1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 34 #include <asm/page.h> 35 #include <asm/pgalloc.h> 36 #include <asm/tlb.h> 37 38 #include <linux/io.h> 39 #include <linux/hugetlb.h> 40 #include <linux/hugetlb_cgroup.h> 41 #include <linux/node.h> 42 #include <linux/userfaultfd_k.h> 43 #include <linux/page_owner.h> 44 #include "internal.h" 45 46 int hugetlb_max_hstate __read_mostly; 47 unsigned int default_hstate_idx; 48 struct hstate hstates[HUGE_MAX_HSTATE]; 49 50 #ifdef CONFIG_CMA 51 static struct cma *hugetlb_cma[MAX_NUMNODES]; 52 #endif 53 static unsigned long hugetlb_cma_size __initdata; 54 55 /* 56 * Minimum page order among possible hugepage sizes, set to a proper value 57 * at boot time. 58 */ 59 static unsigned int minimum_order __read_mostly = UINT_MAX; 60 61 __initdata LIST_HEAD(huge_boot_pages); 62 63 /* for command line parsing */ 64 static struct hstate * __initdata parsed_hstate; 65 static unsigned long __initdata default_hstate_max_huge_pages; 66 static bool __initdata parsed_valid_hugepagesz = true; 67 static bool __initdata parsed_default_hugepagesz; 68 69 /* 70 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 71 * free_huge_pages, and surplus_huge_pages. 72 */ 73 DEFINE_SPINLOCK(hugetlb_lock); 74 75 /* 76 * Serializes faults on the same logical page. This is used to 77 * prevent spurious OOMs when the hugepage pool is fully utilized. 78 */ 79 static int num_fault_mutexes; 80 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 81 82 /* Forward declaration */ 83 static int hugetlb_acct_memory(struct hstate *h, long delta); 84 85 static inline bool subpool_is_free(struct hugepage_subpool *spool) 86 { 87 if (spool->count) 88 return false; 89 if (spool->max_hpages != -1) 90 return spool->used_hpages == 0; 91 if (spool->min_hpages != -1) 92 return spool->rsv_hpages == spool->min_hpages; 93 94 return true; 95 } 96 97 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) 98 { 99 spin_unlock(&spool->lock); 100 101 /* If no pages are used, and no other handles to the subpool 102 * remain, give up any reservations based on minimum size and 103 * free the subpool */ 104 if (subpool_is_free(spool)) { 105 if (spool->min_hpages != -1) 106 hugetlb_acct_memory(spool->hstate, 107 -spool->min_hpages); 108 kfree(spool); 109 } 110 } 111 112 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 113 long min_hpages) 114 { 115 struct hugepage_subpool *spool; 116 117 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 118 if (!spool) 119 return NULL; 120 121 spin_lock_init(&spool->lock); 122 spool->count = 1; 123 spool->max_hpages = max_hpages; 124 spool->hstate = h; 125 spool->min_hpages = min_hpages; 126 127 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 128 kfree(spool); 129 return NULL; 130 } 131 spool->rsv_hpages = min_hpages; 132 133 return spool; 134 } 135 136 void hugepage_put_subpool(struct hugepage_subpool *spool) 137 { 138 spin_lock(&spool->lock); 139 BUG_ON(!spool->count); 140 spool->count--; 141 unlock_or_release_subpool(spool); 142 } 143 144 /* 145 * Subpool accounting for allocating and reserving pages. 146 * Return -ENOMEM if there are not enough resources to satisfy the 147 * request. Otherwise, return the number of pages by which the 148 * global pools must be adjusted (upward). The returned value may 149 * only be different than the passed value (delta) in the case where 150 * a subpool minimum size must be maintained. 151 */ 152 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 153 long delta) 154 { 155 long ret = delta; 156 157 if (!spool) 158 return ret; 159 160 spin_lock(&spool->lock); 161 162 if (spool->max_hpages != -1) { /* maximum size accounting */ 163 if ((spool->used_hpages + delta) <= spool->max_hpages) 164 spool->used_hpages += delta; 165 else { 166 ret = -ENOMEM; 167 goto unlock_ret; 168 } 169 } 170 171 /* minimum size accounting */ 172 if (spool->min_hpages != -1 && spool->rsv_hpages) { 173 if (delta > spool->rsv_hpages) { 174 /* 175 * Asking for more reserves than those already taken on 176 * behalf of subpool. Return difference. 177 */ 178 ret = delta - spool->rsv_hpages; 179 spool->rsv_hpages = 0; 180 } else { 181 ret = 0; /* reserves already accounted for */ 182 spool->rsv_hpages -= delta; 183 } 184 } 185 186 unlock_ret: 187 spin_unlock(&spool->lock); 188 return ret; 189 } 190 191 /* 192 * Subpool accounting for freeing and unreserving pages. 193 * Return the number of global page reservations that must be dropped. 194 * The return value may only be different than the passed value (delta) 195 * in the case where a subpool minimum size must be maintained. 196 */ 197 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 198 long delta) 199 { 200 long ret = delta; 201 202 if (!spool) 203 return delta; 204 205 spin_lock(&spool->lock); 206 207 if (spool->max_hpages != -1) /* maximum size accounting */ 208 spool->used_hpages -= delta; 209 210 /* minimum size accounting */ 211 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 212 if (spool->rsv_hpages + delta <= spool->min_hpages) 213 ret = 0; 214 else 215 ret = spool->rsv_hpages + delta - spool->min_hpages; 216 217 spool->rsv_hpages += delta; 218 if (spool->rsv_hpages > spool->min_hpages) 219 spool->rsv_hpages = spool->min_hpages; 220 } 221 222 /* 223 * If hugetlbfs_put_super couldn't free spool due to an outstanding 224 * quota reference, free it now. 225 */ 226 unlock_or_release_subpool(spool); 227 228 return ret; 229 } 230 231 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 232 { 233 return HUGETLBFS_SB(inode->i_sb)->spool; 234 } 235 236 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 237 { 238 return subpool_inode(file_inode(vma->vm_file)); 239 } 240 241 /* Helper that removes a struct file_region from the resv_map cache and returns 242 * it for use. 243 */ 244 static struct file_region * 245 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 246 { 247 struct file_region *nrg = NULL; 248 249 VM_BUG_ON(resv->region_cache_count <= 0); 250 251 resv->region_cache_count--; 252 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 253 list_del(&nrg->link); 254 255 nrg->from = from; 256 nrg->to = to; 257 258 return nrg; 259 } 260 261 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 262 struct file_region *rg) 263 { 264 #ifdef CONFIG_CGROUP_HUGETLB 265 nrg->reservation_counter = rg->reservation_counter; 266 nrg->css = rg->css; 267 if (rg->css) 268 css_get(rg->css); 269 #endif 270 } 271 272 /* Helper that records hugetlb_cgroup uncharge info. */ 273 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 274 struct hstate *h, 275 struct resv_map *resv, 276 struct file_region *nrg) 277 { 278 #ifdef CONFIG_CGROUP_HUGETLB 279 if (h_cg) { 280 nrg->reservation_counter = 281 &h_cg->rsvd_hugepage[hstate_index(h)]; 282 nrg->css = &h_cg->css; 283 if (!resv->pages_per_hpage) 284 resv->pages_per_hpage = pages_per_huge_page(h); 285 /* pages_per_hpage should be the same for all entries in 286 * a resv_map. 287 */ 288 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 289 } else { 290 nrg->reservation_counter = NULL; 291 nrg->css = NULL; 292 } 293 #endif 294 } 295 296 static bool has_same_uncharge_info(struct file_region *rg, 297 struct file_region *org) 298 { 299 #ifdef CONFIG_CGROUP_HUGETLB 300 return rg && org && 301 rg->reservation_counter == org->reservation_counter && 302 rg->css == org->css; 303 304 #else 305 return true; 306 #endif 307 } 308 309 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 310 { 311 struct file_region *nrg = NULL, *prg = NULL; 312 313 prg = list_prev_entry(rg, link); 314 if (&prg->link != &resv->regions && prg->to == rg->from && 315 has_same_uncharge_info(prg, rg)) { 316 prg->to = rg->to; 317 318 list_del(&rg->link); 319 kfree(rg); 320 321 rg = prg; 322 } 323 324 nrg = list_next_entry(rg, link); 325 if (&nrg->link != &resv->regions && nrg->from == rg->to && 326 has_same_uncharge_info(nrg, rg)) { 327 nrg->from = rg->from; 328 329 list_del(&rg->link); 330 kfree(rg); 331 } 332 } 333 334 static inline long 335 hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from, 336 long to, struct hstate *h, struct hugetlb_cgroup *cg, 337 long *regions_needed) 338 { 339 struct file_region *nrg; 340 341 if (!regions_needed) { 342 nrg = get_file_region_entry_from_cache(map, from, to); 343 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 344 list_add(&nrg->link, rg->link.prev); 345 coalesce_file_region(map, nrg); 346 } else 347 *regions_needed += 1; 348 349 return to - from; 350 } 351 352 /* 353 * Must be called with resv->lock held. 354 * 355 * Calling this with regions_needed != NULL will count the number of pages 356 * to be added but will not modify the linked list. And regions_needed will 357 * indicate the number of file_regions needed in the cache to carry out to add 358 * the regions for this range. 359 */ 360 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 361 struct hugetlb_cgroup *h_cg, 362 struct hstate *h, long *regions_needed) 363 { 364 long add = 0; 365 struct list_head *head = &resv->regions; 366 long last_accounted_offset = f; 367 struct file_region *rg = NULL, *trg = NULL; 368 369 if (regions_needed) 370 *regions_needed = 0; 371 372 /* In this loop, we essentially handle an entry for the range 373 * [last_accounted_offset, rg->from), at every iteration, with some 374 * bounds checking. 375 */ 376 list_for_each_entry_safe(rg, trg, head, link) { 377 /* Skip irrelevant regions that start before our range. */ 378 if (rg->from < f) { 379 /* If this region ends after the last accounted offset, 380 * then we need to update last_accounted_offset. 381 */ 382 if (rg->to > last_accounted_offset) 383 last_accounted_offset = rg->to; 384 continue; 385 } 386 387 /* When we find a region that starts beyond our range, we've 388 * finished. 389 */ 390 if (rg->from >= t) 391 break; 392 393 /* Add an entry for last_accounted_offset -> rg->from, and 394 * update last_accounted_offset. 395 */ 396 if (rg->from > last_accounted_offset) 397 add += hugetlb_resv_map_add(resv, rg, 398 last_accounted_offset, 399 rg->from, h, h_cg, 400 regions_needed); 401 402 last_accounted_offset = rg->to; 403 } 404 405 /* Handle the case where our range extends beyond 406 * last_accounted_offset. 407 */ 408 if (last_accounted_offset < t) 409 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 410 t, h, h_cg, regions_needed); 411 412 VM_BUG_ON(add < 0); 413 return add; 414 } 415 416 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 417 */ 418 static int allocate_file_region_entries(struct resv_map *resv, 419 int regions_needed) 420 __must_hold(&resv->lock) 421 { 422 struct list_head allocated_regions; 423 int to_allocate = 0, i = 0; 424 struct file_region *trg = NULL, *rg = NULL; 425 426 VM_BUG_ON(regions_needed < 0); 427 428 INIT_LIST_HEAD(&allocated_regions); 429 430 /* 431 * Check for sufficient descriptors in the cache to accommodate 432 * the number of in progress add operations plus regions_needed. 433 * 434 * This is a while loop because when we drop the lock, some other call 435 * to region_add or region_del may have consumed some region_entries, 436 * so we keep looping here until we finally have enough entries for 437 * (adds_in_progress + regions_needed). 438 */ 439 while (resv->region_cache_count < 440 (resv->adds_in_progress + regions_needed)) { 441 to_allocate = resv->adds_in_progress + regions_needed - 442 resv->region_cache_count; 443 444 /* At this point, we should have enough entries in the cache 445 * for all the existings adds_in_progress. We should only be 446 * needing to allocate for regions_needed. 447 */ 448 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 449 450 spin_unlock(&resv->lock); 451 for (i = 0; i < to_allocate; i++) { 452 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 453 if (!trg) 454 goto out_of_memory; 455 list_add(&trg->link, &allocated_regions); 456 } 457 458 spin_lock(&resv->lock); 459 460 list_splice(&allocated_regions, &resv->region_cache); 461 resv->region_cache_count += to_allocate; 462 } 463 464 return 0; 465 466 out_of_memory: 467 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 468 list_del(&rg->link); 469 kfree(rg); 470 } 471 return -ENOMEM; 472 } 473 474 /* 475 * Add the huge page range represented by [f, t) to the reserve 476 * map. Regions will be taken from the cache to fill in this range. 477 * Sufficient regions should exist in the cache due to the previous 478 * call to region_chg with the same range, but in some cases the cache will not 479 * have sufficient entries due to races with other code doing region_add or 480 * region_del. The extra needed entries will be allocated. 481 * 482 * regions_needed is the out value provided by a previous call to region_chg. 483 * 484 * Return the number of new huge pages added to the map. This number is greater 485 * than or equal to zero. If file_region entries needed to be allocated for 486 * this operation and we were not able to allocate, it returns -ENOMEM. 487 * region_add of regions of length 1 never allocate file_regions and cannot 488 * fail; region_chg will always allocate at least 1 entry and a region_add for 489 * 1 page will only require at most 1 entry. 490 */ 491 static long region_add(struct resv_map *resv, long f, long t, 492 long in_regions_needed, struct hstate *h, 493 struct hugetlb_cgroup *h_cg) 494 { 495 long add = 0, actual_regions_needed = 0; 496 497 spin_lock(&resv->lock); 498 retry: 499 500 /* Count how many regions are actually needed to execute this add. */ 501 add_reservation_in_range(resv, f, t, NULL, NULL, 502 &actual_regions_needed); 503 504 /* 505 * Check for sufficient descriptors in the cache to accommodate 506 * this add operation. Note that actual_regions_needed may be greater 507 * than in_regions_needed, as the resv_map may have been modified since 508 * the region_chg call. In this case, we need to make sure that we 509 * allocate extra entries, such that we have enough for all the 510 * existing adds_in_progress, plus the excess needed for this 511 * operation. 512 */ 513 if (actual_regions_needed > in_regions_needed && 514 resv->region_cache_count < 515 resv->adds_in_progress + 516 (actual_regions_needed - in_regions_needed)) { 517 /* region_add operation of range 1 should never need to 518 * allocate file_region entries. 519 */ 520 VM_BUG_ON(t - f <= 1); 521 522 if (allocate_file_region_entries( 523 resv, actual_regions_needed - in_regions_needed)) { 524 return -ENOMEM; 525 } 526 527 goto retry; 528 } 529 530 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 531 532 resv->adds_in_progress -= in_regions_needed; 533 534 spin_unlock(&resv->lock); 535 VM_BUG_ON(add < 0); 536 return add; 537 } 538 539 /* 540 * Examine the existing reserve map and determine how many 541 * huge pages in the specified range [f, t) are NOT currently 542 * represented. This routine is called before a subsequent 543 * call to region_add that will actually modify the reserve 544 * map to add the specified range [f, t). region_chg does 545 * not change the number of huge pages represented by the 546 * map. A number of new file_region structures is added to the cache as a 547 * placeholder, for the subsequent region_add call to use. At least 1 548 * file_region structure is added. 549 * 550 * out_regions_needed is the number of regions added to the 551 * resv->adds_in_progress. This value needs to be provided to a follow up call 552 * to region_add or region_abort for proper accounting. 553 * 554 * Returns the number of huge pages that need to be added to the existing 555 * reservation map for the range [f, t). This number is greater or equal to 556 * zero. -ENOMEM is returned if a new file_region structure or cache entry 557 * is needed and can not be allocated. 558 */ 559 static long region_chg(struct resv_map *resv, long f, long t, 560 long *out_regions_needed) 561 { 562 long chg = 0; 563 564 spin_lock(&resv->lock); 565 566 /* Count how many hugepages in this range are NOT represented. */ 567 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 568 out_regions_needed); 569 570 if (*out_regions_needed == 0) 571 *out_regions_needed = 1; 572 573 if (allocate_file_region_entries(resv, *out_regions_needed)) 574 return -ENOMEM; 575 576 resv->adds_in_progress += *out_regions_needed; 577 578 spin_unlock(&resv->lock); 579 return chg; 580 } 581 582 /* 583 * Abort the in progress add operation. The adds_in_progress field 584 * of the resv_map keeps track of the operations in progress between 585 * calls to region_chg and region_add. Operations are sometimes 586 * aborted after the call to region_chg. In such cases, region_abort 587 * is called to decrement the adds_in_progress counter. regions_needed 588 * is the value returned by the region_chg call, it is used to decrement 589 * the adds_in_progress counter. 590 * 591 * NOTE: The range arguments [f, t) are not needed or used in this 592 * routine. They are kept to make reading the calling code easier as 593 * arguments will match the associated region_chg call. 594 */ 595 static void region_abort(struct resv_map *resv, long f, long t, 596 long regions_needed) 597 { 598 spin_lock(&resv->lock); 599 VM_BUG_ON(!resv->region_cache_count); 600 resv->adds_in_progress -= regions_needed; 601 spin_unlock(&resv->lock); 602 } 603 604 /* 605 * Delete the specified range [f, t) from the reserve map. If the 606 * t parameter is LONG_MAX, this indicates that ALL regions after f 607 * should be deleted. Locate the regions which intersect [f, t) 608 * and either trim, delete or split the existing regions. 609 * 610 * Returns the number of huge pages deleted from the reserve map. 611 * In the normal case, the return value is zero or more. In the 612 * case where a region must be split, a new region descriptor must 613 * be allocated. If the allocation fails, -ENOMEM will be returned. 614 * NOTE: If the parameter t == LONG_MAX, then we will never split 615 * a region and possibly return -ENOMEM. Callers specifying 616 * t == LONG_MAX do not need to check for -ENOMEM error. 617 */ 618 static long region_del(struct resv_map *resv, long f, long t) 619 { 620 struct list_head *head = &resv->regions; 621 struct file_region *rg, *trg; 622 struct file_region *nrg = NULL; 623 long del = 0; 624 625 retry: 626 spin_lock(&resv->lock); 627 list_for_each_entry_safe(rg, trg, head, link) { 628 /* 629 * Skip regions before the range to be deleted. file_region 630 * ranges are normally of the form [from, to). However, there 631 * may be a "placeholder" entry in the map which is of the form 632 * (from, to) with from == to. Check for placeholder entries 633 * at the beginning of the range to be deleted. 634 */ 635 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 636 continue; 637 638 if (rg->from >= t) 639 break; 640 641 if (f > rg->from && t < rg->to) { /* Must split region */ 642 /* 643 * Check for an entry in the cache before dropping 644 * lock and attempting allocation. 645 */ 646 if (!nrg && 647 resv->region_cache_count > resv->adds_in_progress) { 648 nrg = list_first_entry(&resv->region_cache, 649 struct file_region, 650 link); 651 list_del(&nrg->link); 652 resv->region_cache_count--; 653 } 654 655 if (!nrg) { 656 spin_unlock(&resv->lock); 657 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 658 if (!nrg) 659 return -ENOMEM; 660 goto retry; 661 } 662 663 del += t - f; 664 hugetlb_cgroup_uncharge_file_region( 665 resv, rg, t - f); 666 667 /* New entry for end of split region */ 668 nrg->from = t; 669 nrg->to = rg->to; 670 671 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 672 673 INIT_LIST_HEAD(&nrg->link); 674 675 /* Original entry is trimmed */ 676 rg->to = f; 677 678 list_add(&nrg->link, &rg->link); 679 nrg = NULL; 680 break; 681 } 682 683 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 684 del += rg->to - rg->from; 685 hugetlb_cgroup_uncharge_file_region(resv, rg, 686 rg->to - rg->from); 687 list_del(&rg->link); 688 kfree(rg); 689 continue; 690 } 691 692 if (f <= rg->from) { /* Trim beginning of region */ 693 hugetlb_cgroup_uncharge_file_region(resv, rg, 694 t - rg->from); 695 696 del += t - rg->from; 697 rg->from = t; 698 } else { /* Trim end of region */ 699 hugetlb_cgroup_uncharge_file_region(resv, rg, 700 rg->to - f); 701 702 del += rg->to - f; 703 rg->to = f; 704 } 705 } 706 707 spin_unlock(&resv->lock); 708 kfree(nrg); 709 return del; 710 } 711 712 /* 713 * A rare out of memory error was encountered which prevented removal of 714 * the reserve map region for a page. The huge page itself was free'ed 715 * and removed from the page cache. This routine will adjust the subpool 716 * usage count, and the global reserve count if needed. By incrementing 717 * these counts, the reserve map entry which could not be deleted will 718 * appear as a "reserved" entry instead of simply dangling with incorrect 719 * counts. 720 */ 721 void hugetlb_fix_reserve_counts(struct inode *inode) 722 { 723 struct hugepage_subpool *spool = subpool_inode(inode); 724 long rsv_adjust; 725 726 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 727 if (rsv_adjust) { 728 struct hstate *h = hstate_inode(inode); 729 730 hugetlb_acct_memory(h, 1); 731 } 732 } 733 734 /* 735 * Count and return the number of huge pages in the reserve map 736 * that intersect with the range [f, t). 737 */ 738 static long region_count(struct resv_map *resv, long f, long t) 739 { 740 struct list_head *head = &resv->regions; 741 struct file_region *rg; 742 long chg = 0; 743 744 spin_lock(&resv->lock); 745 /* Locate each segment we overlap with, and count that overlap. */ 746 list_for_each_entry(rg, head, link) { 747 long seg_from; 748 long seg_to; 749 750 if (rg->to <= f) 751 continue; 752 if (rg->from >= t) 753 break; 754 755 seg_from = max(rg->from, f); 756 seg_to = min(rg->to, t); 757 758 chg += seg_to - seg_from; 759 } 760 spin_unlock(&resv->lock); 761 762 return chg; 763 } 764 765 /* 766 * Convert the address within this vma to the page offset within 767 * the mapping, in pagecache page units; huge pages here. 768 */ 769 static pgoff_t vma_hugecache_offset(struct hstate *h, 770 struct vm_area_struct *vma, unsigned long address) 771 { 772 return ((address - vma->vm_start) >> huge_page_shift(h)) + 773 (vma->vm_pgoff >> huge_page_order(h)); 774 } 775 776 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 777 unsigned long address) 778 { 779 return vma_hugecache_offset(hstate_vma(vma), vma, address); 780 } 781 EXPORT_SYMBOL_GPL(linear_hugepage_index); 782 783 /* 784 * Return the size of the pages allocated when backing a VMA. In the majority 785 * cases this will be same size as used by the page table entries. 786 */ 787 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 788 { 789 if (vma->vm_ops && vma->vm_ops->pagesize) 790 return vma->vm_ops->pagesize(vma); 791 return PAGE_SIZE; 792 } 793 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 794 795 /* 796 * Return the page size being used by the MMU to back a VMA. In the majority 797 * of cases, the page size used by the kernel matches the MMU size. On 798 * architectures where it differs, an architecture-specific 'strong' 799 * version of this symbol is required. 800 */ 801 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 802 { 803 return vma_kernel_pagesize(vma); 804 } 805 806 /* 807 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 808 * bits of the reservation map pointer, which are always clear due to 809 * alignment. 810 */ 811 #define HPAGE_RESV_OWNER (1UL << 0) 812 #define HPAGE_RESV_UNMAPPED (1UL << 1) 813 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 814 815 /* 816 * These helpers are used to track how many pages are reserved for 817 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 818 * is guaranteed to have their future faults succeed. 819 * 820 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 821 * the reserve counters are updated with the hugetlb_lock held. It is safe 822 * to reset the VMA at fork() time as it is not in use yet and there is no 823 * chance of the global counters getting corrupted as a result of the values. 824 * 825 * The private mapping reservation is represented in a subtly different 826 * manner to a shared mapping. A shared mapping has a region map associated 827 * with the underlying file, this region map represents the backing file 828 * pages which have ever had a reservation assigned which this persists even 829 * after the page is instantiated. A private mapping has a region map 830 * associated with the original mmap which is attached to all VMAs which 831 * reference it, this region map represents those offsets which have consumed 832 * reservation ie. where pages have been instantiated. 833 */ 834 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 835 { 836 return (unsigned long)vma->vm_private_data; 837 } 838 839 static void set_vma_private_data(struct vm_area_struct *vma, 840 unsigned long value) 841 { 842 vma->vm_private_data = (void *)value; 843 } 844 845 static void 846 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 847 struct hugetlb_cgroup *h_cg, 848 struct hstate *h) 849 { 850 #ifdef CONFIG_CGROUP_HUGETLB 851 if (!h_cg || !h) { 852 resv_map->reservation_counter = NULL; 853 resv_map->pages_per_hpage = 0; 854 resv_map->css = NULL; 855 } else { 856 resv_map->reservation_counter = 857 &h_cg->rsvd_hugepage[hstate_index(h)]; 858 resv_map->pages_per_hpage = pages_per_huge_page(h); 859 resv_map->css = &h_cg->css; 860 } 861 #endif 862 } 863 864 struct resv_map *resv_map_alloc(void) 865 { 866 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 867 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 868 869 if (!resv_map || !rg) { 870 kfree(resv_map); 871 kfree(rg); 872 return NULL; 873 } 874 875 kref_init(&resv_map->refs); 876 spin_lock_init(&resv_map->lock); 877 INIT_LIST_HEAD(&resv_map->regions); 878 879 resv_map->adds_in_progress = 0; 880 /* 881 * Initialize these to 0. On shared mappings, 0's here indicate these 882 * fields don't do cgroup accounting. On private mappings, these will be 883 * re-initialized to the proper values, to indicate that hugetlb cgroup 884 * reservations are to be un-charged from here. 885 */ 886 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 887 888 INIT_LIST_HEAD(&resv_map->region_cache); 889 list_add(&rg->link, &resv_map->region_cache); 890 resv_map->region_cache_count = 1; 891 892 return resv_map; 893 } 894 895 void resv_map_release(struct kref *ref) 896 { 897 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 898 struct list_head *head = &resv_map->region_cache; 899 struct file_region *rg, *trg; 900 901 /* Clear out any active regions before we release the map. */ 902 region_del(resv_map, 0, LONG_MAX); 903 904 /* ... and any entries left in the cache */ 905 list_for_each_entry_safe(rg, trg, head, link) { 906 list_del(&rg->link); 907 kfree(rg); 908 } 909 910 VM_BUG_ON(resv_map->adds_in_progress); 911 912 kfree(resv_map); 913 } 914 915 static inline struct resv_map *inode_resv_map(struct inode *inode) 916 { 917 /* 918 * At inode evict time, i_mapping may not point to the original 919 * address space within the inode. This original address space 920 * contains the pointer to the resv_map. So, always use the 921 * address space embedded within the inode. 922 * The VERY common case is inode->mapping == &inode->i_data but, 923 * this may not be true for device special inodes. 924 */ 925 return (struct resv_map *)(&inode->i_data)->private_data; 926 } 927 928 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 929 { 930 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 931 if (vma->vm_flags & VM_MAYSHARE) { 932 struct address_space *mapping = vma->vm_file->f_mapping; 933 struct inode *inode = mapping->host; 934 935 return inode_resv_map(inode); 936 937 } else { 938 return (struct resv_map *)(get_vma_private_data(vma) & 939 ~HPAGE_RESV_MASK); 940 } 941 } 942 943 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 944 { 945 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 946 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 947 948 set_vma_private_data(vma, (get_vma_private_data(vma) & 949 HPAGE_RESV_MASK) | (unsigned long)map); 950 } 951 952 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 953 { 954 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 955 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 956 957 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 958 } 959 960 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 961 { 962 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 963 964 return (get_vma_private_data(vma) & flag) != 0; 965 } 966 967 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 968 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 969 { 970 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 971 if (!(vma->vm_flags & VM_MAYSHARE)) 972 vma->vm_private_data = (void *)0; 973 } 974 975 /* Returns true if the VMA has associated reserve pages */ 976 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 977 { 978 if (vma->vm_flags & VM_NORESERVE) { 979 /* 980 * This address is already reserved by other process(chg == 0), 981 * so, we should decrement reserved count. Without decrementing, 982 * reserve count remains after releasing inode, because this 983 * allocated page will go into page cache and is regarded as 984 * coming from reserved pool in releasing step. Currently, we 985 * don't have any other solution to deal with this situation 986 * properly, so add work-around here. 987 */ 988 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 989 return true; 990 else 991 return false; 992 } 993 994 /* Shared mappings always use reserves */ 995 if (vma->vm_flags & VM_MAYSHARE) { 996 /* 997 * We know VM_NORESERVE is not set. Therefore, there SHOULD 998 * be a region map for all pages. The only situation where 999 * there is no region map is if a hole was punched via 1000 * fallocate. In this case, there really are no reserves to 1001 * use. This situation is indicated if chg != 0. 1002 */ 1003 if (chg) 1004 return false; 1005 else 1006 return true; 1007 } 1008 1009 /* 1010 * Only the process that called mmap() has reserves for 1011 * private mappings. 1012 */ 1013 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1014 /* 1015 * Like the shared case above, a hole punch or truncate 1016 * could have been performed on the private mapping. 1017 * Examine the value of chg to determine if reserves 1018 * actually exist or were previously consumed. 1019 * Very Subtle - The value of chg comes from a previous 1020 * call to vma_needs_reserves(). The reserve map for 1021 * private mappings has different (opposite) semantics 1022 * than that of shared mappings. vma_needs_reserves() 1023 * has already taken this difference in semantics into 1024 * account. Therefore, the meaning of chg is the same 1025 * as in the shared case above. Code could easily be 1026 * combined, but keeping it separate draws attention to 1027 * subtle differences. 1028 */ 1029 if (chg) 1030 return false; 1031 else 1032 return true; 1033 } 1034 1035 return false; 1036 } 1037 1038 static void enqueue_huge_page(struct hstate *h, struct page *page) 1039 { 1040 int nid = page_to_nid(page); 1041 list_move(&page->lru, &h->hugepage_freelists[nid]); 1042 h->free_huge_pages++; 1043 h->free_huge_pages_node[nid]++; 1044 SetHPageFreed(page); 1045 } 1046 1047 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 1048 { 1049 struct page *page; 1050 bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); 1051 1052 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { 1053 if (nocma && is_migrate_cma_page(page)) 1054 continue; 1055 1056 if (PageHWPoison(page)) 1057 continue; 1058 1059 list_move(&page->lru, &h->hugepage_activelist); 1060 set_page_refcounted(page); 1061 ClearHPageFreed(page); 1062 h->free_huge_pages--; 1063 h->free_huge_pages_node[nid]--; 1064 return page; 1065 } 1066 1067 return NULL; 1068 } 1069 1070 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 1071 nodemask_t *nmask) 1072 { 1073 unsigned int cpuset_mems_cookie; 1074 struct zonelist *zonelist; 1075 struct zone *zone; 1076 struct zoneref *z; 1077 int node = NUMA_NO_NODE; 1078 1079 zonelist = node_zonelist(nid, gfp_mask); 1080 1081 retry_cpuset: 1082 cpuset_mems_cookie = read_mems_allowed_begin(); 1083 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1084 struct page *page; 1085 1086 if (!cpuset_zone_allowed(zone, gfp_mask)) 1087 continue; 1088 /* 1089 * no need to ask again on the same node. Pool is node rather than 1090 * zone aware 1091 */ 1092 if (zone_to_nid(zone) == node) 1093 continue; 1094 node = zone_to_nid(zone); 1095 1096 page = dequeue_huge_page_node_exact(h, node); 1097 if (page) 1098 return page; 1099 } 1100 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1101 goto retry_cpuset; 1102 1103 return NULL; 1104 } 1105 1106 static struct page *dequeue_huge_page_vma(struct hstate *h, 1107 struct vm_area_struct *vma, 1108 unsigned long address, int avoid_reserve, 1109 long chg) 1110 { 1111 struct page *page; 1112 struct mempolicy *mpol; 1113 gfp_t gfp_mask; 1114 nodemask_t *nodemask; 1115 int nid; 1116 1117 /* 1118 * A child process with MAP_PRIVATE mappings created by their parent 1119 * have no page reserves. This check ensures that reservations are 1120 * not "stolen". The child may still get SIGKILLed 1121 */ 1122 if (!vma_has_reserves(vma, chg) && 1123 h->free_huge_pages - h->resv_huge_pages == 0) 1124 goto err; 1125 1126 /* If reserves cannot be used, ensure enough pages are in the pool */ 1127 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 1128 goto err; 1129 1130 gfp_mask = htlb_alloc_mask(h); 1131 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1132 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1133 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 1134 SetHPageRestoreReserve(page); 1135 h->resv_huge_pages--; 1136 } 1137 1138 mpol_cond_put(mpol); 1139 return page; 1140 1141 err: 1142 return NULL; 1143 } 1144 1145 /* 1146 * common helper functions for hstate_next_node_to_{alloc|free}. 1147 * We may have allocated or freed a huge page based on a different 1148 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1149 * be outside of *nodes_allowed. Ensure that we use an allowed 1150 * node for alloc or free. 1151 */ 1152 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1153 { 1154 nid = next_node_in(nid, *nodes_allowed); 1155 VM_BUG_ON(nid >= MAX_NUMNODES); 1156 1157 return nid; 1158 } 1159 1160 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1161 { 1162 if (!node_isset(nid, *nodes_allowed)) 1163 nid = next_node_allowed(nid, nodes_allowed); 1164 return nid; 1165 } 1166 1167 /* 1168 * returns the previously saved node ["this node"] from which to 1169 * allocate a persistent huge page for the pool and advance the 1170 * next node from which to allocate, handling wrap at end of node 1171 * mask. 1172 */ 1173 static int hstate_next_node_to_alloc(struct hstate *h, 1174 nodemask_t *nodes_allowed) 1175 { 1176 int nid; 1177 1178 VM_BUG_ON(!nodes_allowed); 1179 1180 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1181 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1182 1183 return nid; 1184 } 1185 1186 /* 1187 * helper for free_pool_huge_page() - return the previously saved 1188 * node ["this node"] from which to free a huge page. Advance the 1189 * next node id whether or not we find a free huge page to free so 1190 * that the next attempt to free addresses the next node. 1191 */ 1192 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1193 { 1194 int nid; 1195 1196 VM_BUG_ON(!nodes_allowed); 1197 1198 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1199 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1200 1201 return nid; 1202 } 1203 1204 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1205 for (nr_nodes = nodes_weight(*mask); \ 1206 nr_nodes > 0 && \ 1207 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1208 nr_nodes--) 1209 1210 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1211 for (nr_nodes = nodes_weight(*mask); \ 1212 nr_nodes > 0 && \ 1213 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1214 nr_nodes--) 1215 1216 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1217 static void destroy_compound_gigantic_page(struct page *page, 1218 unsigned int order) 1219 { 1220 int i; 1221 int nr_pages = 1 << order; 1222 struct page *p = page + 1; 1223 1224 atomic_set(compound_mapcount_ptr(page), 0); 1225 atomic_set(compound_pincount_ptr(page), 0); 1226 1227 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1228 clear_compound_head(p); 1229 set_page_refcounted(p); 1230 } 1231 1232 set_compound_order(page, 0); 1233 page[1].compound_nr = 0; 1234 __ClearPageHead(page); 1235 } 1236 1237 static void free_gigantic_page(struct page *page, unsigned int order) 1238 { 1239 /* 1240 * If the page isn't allocated using the cma allocator, 1241 * cma_release() returns false. 1242 */ 1243 #ifdef CONFIG_CMA 1244 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) 1245 return; 1246 #endif 1247 1248 free_contig_range(page_to_pfn(page), 1 << order); 1249 } 1250 1251 #ifdef CONFIG_CONTIG_ALLOC 1252 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1253 int nid, nodemask_t *nodemask) 1254 { 1255 unsigned long nr_pages = 1UL << huge_page_order(h); 1256 if (nid == NUMA_NO_NODE) 1257 nid = numa_mem_id(); 1258 1259 #ifdef CONFIG_CMA 1260 { 1261 struct page *page; 1262 int node; 1263 1264 if (hugetlb_cma[nid]) { 1265 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1266 huge_page_order(h), true); 1267 if (page) 1268 return page; 1269 } 1270 1271 if (!(gfp_mask & __GFP_THISNODE)) { 1272 for_each_node_mask(node, *nodemask) { 1273 if (node == nid || !hugetlb_cma[node]) 1274 continue; 1275 1276 page = cma_alloc(hugetlb_cma[node], nr_pages, 1277 huge_page_order(h), true); 1278 if (page) 1279 return page; 1280 } 1281 } 1282 } 1283 #endif 1284 1285 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1286 } 1287 1288 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); 1289 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 1290 #else /* !CONFIG_CONTIG_ALLOC */ 1291 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1292 int nid, nodemask_t *nodemask) 1293 { 1294 return NULL; 1295 } 1296 #endif /* CONFIG_CONTIG_ALLOC */ 1297 1298 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1299 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1300 int nid, nodemask_t *nodemask) 1301 { 1302 return NULL; 1303 } 1304 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1305 static inline void destroy_compound_gigantic_page(struct page *page, 1306 unsigned int order) { } 1307 #endif 1308 1309 static void update_and_free_page(struct hstate *h, struct page *page) 1310 { 1311 int i; 1312 struct page *subpage = page; 1313 1314 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1315 return; 1316 1317 h->nr_huge_pages--; 1318 h->nr_huge_pages_node[page_to_nid(page)]--; 1319 for (i = 0; i < pages_per_huge_page(h); 1320 i++, subpage = mem_map_next(subpage, page, i)) { 1321 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1322 1 << PG_referenced | 1 << PG_dirty | 1323 1 << PG_active | 1 << PG_private | 1324 1 << PG_writeback); 1325 } 1326 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1327 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); 1328 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1329 set_page_refcounted(page); 1330 if (hstate_is_gigantic(h)) { 1331 /* 1332 * Temporarily drop the hugetlb_lock, because 1333 * we might block in free_gigantic_page(). 1334 */ 1335 spin_unlock(&hugetlb_lock); 1336 destroy_compound_gigantic_page(page, huge_page_order(h)); 1337 free_gigantic_page(page, huge_page_order(h)); 1338 spin_lock(&hugetlb_lock); 1339 } else { 1340 __free_pages(page, huge_page_order(h)); 1341 } 1342 } 1343 1344 struct hstate *size_to_hstate(unsigned long size) 1345 { 1346 struct hstate *h; 1347 1348 for_each_hstate(h) { 1349 if (huge_page_size(h) == size) 1350 return h; 1351 } 1352 return NULL; 1353 } 1354 1355 static void __free_huge_page(struct page *page) 1356 { 1357 /* 1358 * Can't pass hstate in here because it is called from the 1359 * compound page destructor. 1360 */ 1361 struct hstate *h = page_hstate(page); 1362 int nid = page_to_nid(page); 1363 struct hugepage_subpool *spool = hugetlb_page_subpool(page); 1364 bool restore_reserve; 1365 1366 VM_BUG_ON_PAGE(page_count(page), page); 1367 VM_BUG_ON_PAGE(page_mapcount(page), page); 1368 1369 hugetlb_set_page_subpool(page, NULL); 1370 page->mapping = NULL; 1371 restore_reserve = HPageRestoreReserve(page); 1372 ClearHPageRestoreReserve(page); 1373 1374 /* 1375 * If HPageRestoreReserve was set on page, page allocation consumed a 1376 * reservation. If the page was associated with a subpool, there 1377 * would have been a page reserved in the subpool before allocation 1378 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1379 * reservation, do not call hugepage_subpool_put_pages() as this will 1380 * remove the reserved page from the subpool. 1381 */ 1382 if (!restore_reserve) { 1383 /* 1384 * A return code of zero implies that the subpool will be 1385 * under its minimum size if the reservation is not restored 1386 * after page is free. Therefore, force restore_reserve 1387 * operation. 1388 */ 1389 if (hugepage_subpool_put_pages(spool, 1) == 0) 1390 restore_reserve = true; 1391 } 1392 1393 spin_lock(&hugetlb_lock); 1394 ClearHPageMigratable(page); 1395 hugetlb_cgroup_uncharge_page(hstate_index(h), 1396 pages_per_huge_page(h), page); 1397 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 1398 pages_per_huge_page(h), page); 1399 if (restore_reserve) 1400 h->resv_huge_pages++; 1401 1402 if (HPageTemporary(page)) { 1403 list_del(&page->lru); 1404 ClearHPageTemporary(page); 1405 update_and_free_page(h, page); 1406 } else if (h->surplus_huge_pages_node[nid]) { 1407 /* remove the page from active list */ 1408 list_del(&page->lru); 1409 update_and_free_page(h, page); 1410 h->surplus_huge_pages--; 1411 h->surplus_huge_pages_node[nid]--; 1412 } else { 1413 arch_clear_hugepage_flags(page); 1414 enqueue_huge_page(h, page); 1415 } 1416 spin_unlock(&hugetlb_lock); 1417 } 1418 1419 /* 1420 * As free_huge_page() can be called from a non-task context, we have 1421 * to defer the actual freeing in a workqueue to prevent potential 1422 * hugetlb_lock deadlock. 1423 * 1424 * free_hpage_workfn() locklessly retrieves the linked list of pages to 1425 * be freed and frees them one-by-one. As the page->mapping pointer is 1426 * going to be cleared in __free_huge_page() anyway, it is reused as the 1427 * llist_node structure of a lockless linked list of huge pages to be freed. 1428 */ 1429 static LLIST_HEAD(hpage_freelist); 1430 1431 static void free_hpage_workfn(struct work_struct *work) 1432 { 1433 struct llist_node *node; 1434 struct page *page; 1435 1436 node = llist_del_all(&hpage_freelist); 1437 1438 while (node) { 1439 page = container_of((struct address_space **)node, 1440 struct page, mapping); 1441 node = node->next; 1442 __free_huge_page(page); 1443 } 1444 } 1445 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1446 1447 void free_huge_page(struct page *page) 1448 { 1449 /* 1450 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. 1451 */ 1452 if (!in_task()) { 1453 /* 1454 * Only call schedule_work() if hpage_freelist is previously 1455 * empty. Otherwise, schedule_work() had been called but the 1456 * workfn hasn't retrieved the list yet. 1457 */ 1458 if (llist_add((struct llist_node *)&page->mapping, 1459 &hpage_freelist)) 1460 schedule_work(&free_hpage_work); 1461 return; 1462 } 1463 1464 __free_huge_page(page); 1465 } 1466 1467 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1468 { 1469 INIT_LIST_HEAD(&page->lru); 1470 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1471 hugetlb_set_page_subpool(page, NULL); 1472 set_hugetlb_cgroup(page, NULL); 1473 set_hugetlb_cgroup_rsvd(page, NULL); 1474 spin_lock(&hugetlb_lock); 1475 h->nr_huge_pages++; 1476 h->nr_huge_pages_node[nid]++; 1477 ClearHPageFreed(page); 1478 spin_unlock(&hugetlb_lock); 1479 } 1480 1481 static void prep_compound_gigantic_page(struct page *page, unsigned int order) 1482 { 1483 int i; 1484 int nr_pages = 1 << order; 1485 struct page *p = page + 1; 1486 1487 /* we rely on prep_new_huge_page to set the destructor */ 1488 set_compound_order(page, order); 1489 __ClearPageReserved(page); 1490 __SetPageHead(page); 1491 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1492 /* 1493 * For gigantic hugepages allocated through bootmem at 1494 * boot, it's safer to be consistent with the not-gigantic 1495 * hugepages and clear the PG_reserved bit from all tail pages 1496 * too. Otherwise drivers using get_user_pages() to access tail 1497 * pages may get the reference counting wrong if they see 1498 * PG_reserved set on a tail page (despite the head page not 1499 * having PG_reserved set). Enforcing this consistency between 1500 * head and tail pages allows drivers to optimize away a check 1501 * on the head page when they need know if put_page() is needed 1502 * after get_user_pages(). 1503 */ 1504 __ClearPageReserved(p); 1505 set_page_count(p, 0); 1506 set_compound_head(p, page); 1507 } 1508 atomic_set(compound_mapcount_ptr(page), -1); 1509 atomic_set(compound_pincount_ptr(page), 0); 1510 } 1511 1512 /* 1513 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1514 * transparent huge pages. See the PageTransHuge() documentation for more 1515 * details. 1516 */ 1517 int PageHuge(struct page *page) 1518 { 1519 if (!PageCompound(page)) 1520 return 0; 1521 1522 page = compound_head(page); 1523 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1524 } 1525 EXPORT_SYMBOL_GPL(PageHuge); 1526 1527 /* 1528 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1529 * normal or transparent huge pages. 1530 */ 1531 int PageHeadHuge(struct page *page_head) 1532 { 1533 if (!PageHead(page_head)) 1534 return 0; 1535 1536 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR; 1537 } 1538 1539 /* 1540 * Find and lock address space (mapping) in write mode. 1541 * 1542 * Upon entry, the page is locked which means that page_mapping() is 1543 * stable. Due to locking order, we can only trylock_write. If we can 1544 * not get the lock, simply return NULL to caller. 1545 */ 1546 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 1547 { 1548 struct address_space *mapping = page_mapping(hpage); 1549 1550 if (!mapping) 1551 return mapping; 1552 1553 if (i_mmap_trylock_write(mapping)) 1554 return mapping; 1555 1556 return NULL; 1557 } 1558 1559 pgoff_t __basepage_index(struct page *page) 1560 { 1561 struct page *page_head = compound_head(page); 1562 pgoff_t index = page_index(page_head); 1563 unsigned long compound_idx; 1564 1565 if (!PageHuge(page_head)) 1566 return page_index(page); 1567 1568 if (compound_order(page_head) >= MAX_ORDER) 1569 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1570 else 1571 compound_idx = page - page_head; 1572 1573 return (index << compound_order(page_head)) + compound_idx; 1574 } 1575 1576 static struct page *alloc_buddy_huge_page(struct hstate *h, 1577 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1578 nodemask_t *node_alloc_noretry) 1579 { 1580 int order = huge_page_order(h); 1581 struct page *page; 1582 bool alloc_try_hard = true; 1583 1584 /* 1585 * By default we always try hard to allocate the page with 1586 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 1587 * a loop (to adjust global huge page counts) and previous allocation 1588 * failed, do not continue to try hard on the same node. Use the 1589 * node_alloc_noretry bitmap to manage this state information. 1590 */ 1591 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1592 alloc_try_hard = false; 1593 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 1594 if (alloc_try_hard) 1595 gfp_mask |= __GFP_RETRY_MAYFAIL; 1596 if (nid == NUMA_NO_NODE) 1597 nid = numa_mem_id(); 1598 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); 1599 if (page) 1600 __count_vm_event(HTLB_BUDDY_PGALLOC); 1601 else 1602 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1603 1604 /* 1605 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 1606 * indicates an overall state change. Clear bit so that we resume 1607 * normal 'try hard' allocations. 1608 */ 1609 if (node_alloc_noretry && page && !alloc_try_hard) 1610 node_clear(nid, *node_alloc_noretry); 1611 1612 /* 1613 * If we tried hard to get a page but failed, set bit so that 1614 * subsequent attempts will not try as hard until there is an 1615 * overall state change. 1616 */ 1617 if (node_alloc_noretry && !page && alloc_try_hard) 1618 node_set(nid, *node_alloc_noretry); 1619 1620 return page; 1621 } 1622 1623 /* 1624 * Common helper to allocate a fresh hugetlb page. All specific allocators 1625 * should use this function to get new hugetlb pages 1626 */ 1627 static struct page *alloc_fresh_huge_page(struct hstate *h, 1628 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1629 nodemask_t *node_alloc_noretry) 1630 { 1631 struct page *page; 1632 1633 if (hstate_is_gigantic(h)) 1634 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); 1635 else 1636 page = alloc_buddy_huge_page(h, gfp_mask, 1637 nid, nmask, node_alloc_noretry); 1638 if (!page) 1639 return NULL; 1640 1641 if (hstate_is_gigantic(h)) 1642 prep_compound_gigantic_page(page, huge_page_order(h)); 1643 prep_new_huge_page(h, page, page_to_nid(page)); 1644 1645 return page; 1646 } 1647 1648 /* 1649 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 1650 * manner. 1651 */ 1652 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1653 nodemask_t *node_alloc_noretry) 1654 { 1655 struct page *page; 1656 int nr_nodes, node; 1657 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 1658 1659 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1660 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, 1661 node_alloc_noretry); 1662 if (page) 1663 break; 1664 } 1665 1666 if (!page) 1667 return 0; 1668 1669 put_page(page); /* free it into the hugepage allocator */ 1670 1671 return 1; 1672 } 1673 1674 /* 1675 * Free huge page from pool from next node to free. 1676 * Attempt to keep persistent huge pages more or less 1677 * balanced over allowed nodes. 1678 * Called with hugetlb_lock locked. 1679 */ 1680 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1681 bool acct_surplus) 1682 { 1683 int nr_nodes, node; 1684 int ret = 0; 1685 1686 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 1687 /* 1688 * If we're returning unused surplus pages, only examine 1689 * nodes with surplus pages. 1690 */ 1691 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 1692 !list_empty(&h->hugepage_freelists[node])) { 1693 struct page *page = 1694 list_entry(h->hugepage_freelists[node].next, 1695 struct page, lru); 1696 list_del(&page->lru); 1697 h->free_huge_pages--; 1698 h->free_huge_pages_node[node]--; 1699 if (acct_surplus) { 1700 h->surplus_huge_pages--; 1701 h->surplus_huge_pages_node[node]--; 1702 } 1703 update_and_free_page(h, page); 1704 ret = 1; 1705 break; 1706 } 1707 } 1708 1709 return ret; 1710 } 1711 1712 /* 1713 * Dissolve a given free hugepage into free buddy pages. This function does 1714 * nothing for in-use hugepages and non-hugepages. 1715 * This function returns values like below: 1716 * 1717 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 1718 * (allocated or reserved.) 1719 * 0: successfully dissolved free hugepages or the page is not a 1720 * hugepage (considered as already dissolved) 1721 */ 1722 int dissolve_free_huge_page(struct page *page) 1723 { 1724 int rc = -EBUSY; 1725 1726 retry: 1727 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 1728 if (!PageHuge(page)) 1729 return 0; 1730 1731 spin_lock(&hugetlb_lock); 1732 if (!PageHuge(page)) { 1733 rc = 0; 1734 goto out; 1735 } 1736 1737 if (!page_count(page)) { 1738 struct page *head = compound_head(page); 1739 struct hstate *h = page_hstate(head); 1740 int nid = page_to_nid(head); 1741 if (h->free_huge_pages - h->resv_huge_pages == 0) 1742 goto out; 1743 1744 /* 1745 * We should make sure that the page is already on the free list 1746 * when it is dissolved. 1747 */ 1748 if (unlikely(!HPageFreed(head))) { 1749 spin_unlock(&hugetlb_lock); 1750 cond_resched(); 1751 1752 /* 1753 * Theoretically, we should return -EBUSY when we 1754 * encounter this race. In fact, we have a chance 1755 * to successfully dissolve the page if we do a 1756 * retry. Because the race window is quite small. 1757 * If we seize this opportunity, it is an optimization 1758 * for increasing the success rate of dissolving page. 1759 */ 1760 goto retry; 1761 } 1762 1763 /* 1764 * Move PageHWPoison flag from head page to the raw error page, 1765 * which makes any subpages rather than the error page reusable. 1766 */ 1767 if (PageHWPoison(head) && page != head) { 1768 SetPageHWPoison(page); 1769 ClearPageHWPoison(head); 1770 } 1771 list_del(&head->lru); 1772 h->free_huge_pages--; 1773 h->free_huge_pages_node[nid]--; 1774 h->max_huge_pages--; 1775 update_and_free_page(h, head); 1776 rc = 0; 1777 } 1778 out: 1779 spin_unlock(&hugetlb_lock); 1780 return rc; 1781 } 1782 1783 /* 1784 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 1785 * make specified memory blocks removable from the system. 1786 * Note that this will dissolve a free gigantic hugepage completely, if any 1787 * part of it lies within the given range. 1788 * Also note that if dissolve_free_huge_page() returns with an error, all 1789 * free hugepages that were dissolved before that error are lost. 1790 */ 1791 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 1792 { 1793 unsigned long pfn; 1794 struct page *page; 1795 int rc = 0; 1796 1797 if (!hugepages_supported()) 1798 return rc; 1799 1800 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 1801 page = pfn_to_page(pfn); 1802 rc = dissolve_free_huge_page(page); 1803 if (rc) 1804 break; 1805 } 1806 1807 return rc; 1808 } 1809 1810 /* 1811 * Allocates a fresh surplus page from the page allocator. 1812 */ 1813 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 1814 int nid, nodemask_t *nmask) 1815 { 1816 struct page *page = NULL; 1817 1818 if (hstate_is_gigantic(h)) 1819 return NULL; 1820 1821 spin_lock(&hugetlb_lock); 1822 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 1823 goto out_unlock; 1824 spin_unlock(&hugetlb_lock); 1825 1826 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 1827 if (!page) 1828 return NULL; 1829 1830 spin_lock(&hugetlb_lock); 1831 /* 1832 * We could have raced with the pool size change. 1833 * Double check that and simply deallocate the new page 1834 * if we would end up overcommiting the surpluses. Abuse 1835 * temporary page to workaround the nasty free_huge_page 1836 * codeflow 1837 */ 1838 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 1839 SetHPageTemporary(page); 1840 spin_unlock(&hugetlb_lock); 1841 put_page(page); 1842 return NULL; 1843 } else { 1844 h->surplus_huge_pages++; 1845 h->surplus_huge_pages_node[page_to_nid(page)]++; 1846 } 1847 1848 out_unlock: 1849 spin_unlock(&hugetlb_lock); 1850 1851 return page; 1852 } 1853 1854 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 1855 int nid, nodemask_t *nmask) 1856 { 1857 struct page *page; 1858 1859 if (hstate_is_gigantic(h)) 1860 return NULL; 1861 1862 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 1863 if (!page) 1864 return NULL; 1865 1866 /* 1867 * We do not account these pages as surplus because they are only 1868 * temporary and will be released properly on the last reference 1869 */ 1870 SetHPageTemporary(page); 1871 1872 return page; 1873 } 1874 1875 /* 1876 * Use the VMA's mpolicy to allocate a huge page from the buddy. 1877 */ 1878 static 1879 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 1880 struct vm_area_struct *vma, unsigned long addr) 1881 { 1882 struct page *page; 1883 struct mempolicy *mpol; 1884 gfp_t gfp_mask = htlb_alloc_mask(h); 1885 int nid; 1886 nodemask_t *nodemask; 1887 1888 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 1889 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); 1890 mpol_cond_put(mpol); 1891 1892 return page; 1893 } 1894 1895 /* page migration callback function */ 1896 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 1897 nodemask_t *nmask, gfp_t gfp_mask) 1898 { 1899 spin_lock(&hugetlb_lock); 1900 if (h->free_huge_pages - h->resv_huge_pages > 0) { 1901 struct page *page; 1902 1903 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 1904 if (page) { 1905 spin_unlock(&hugetlb_lock); 1906 return page; 1907 } 1908 } 1909 spin_unlock(&hugetlb_lock); 1910 1911 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 1912 } 1913 1914 /* mempolicy aware migration callback */ 1915 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 1916 unsigned long address) 1917 { 1918 struct mempolicy *mpol; 1919 nodemask_t *nodemask; 1920 struct page *page; 1921 gfp_t gfp_mask; 1922 int node; 1923 1924 gfp_mask = htlb_alloc_mask(h); 1925 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1926 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); 1927 mpol_cond_put(mpol); 1928 1929 return page; 1930 } 1931 1932 /* 1933 * Increase the hugetlb pool such that it can accommodate a reservation 1934 * of size 'delta'. 1935 */ 1936 static int gather_surplus_pages(struct hstate *h, long delta) 1937 __must_hold(&hugetlb_lock) 1938 { 1939 struct list_head surplus_list; 1940 struct page *page, *tmp; 1941 int ret; 1942 long i; 1943 long needed, allocated; 1944 bool alloc_ok = true; 1945 1946 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 1947 if (needed <= 0) { 1948 h->resv_huge_pages += delta; 1949 return 0; 1950 } 1951 1952 allocated = 0; 1953 INIT_LIST_HEAD(&surplus_list); 1954 1955 ret = -ENOMEM; 1956 retry: 1957 spin_unlock(&hugetlb_lock); 1958 for (i = 0; i < needed; i++) { 1959 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 1960 NUMA_NO_NODE, NULL); 1961 if (!page) { 1962 alloc_ok = false; 1963 break; 1964 } 1965 list_add(&page->lru, &surplus_list); 1966 cond_resched(); 1967 } 1968 allocated += i; 1969 1970 /* 1971 * After retaking hugetlb_lock, we need to recalculate 'needed' 1972 * because either resv_huge_pages or free_huge_pages may have changed. 1973 */ 1974 spin_lock(&hugetlb_lock); 1975 needed = (h->resv_huge_pages + delta) - 1976 (h->free_huge_pages + allocated); 1977 if (needed > 0) { 1978 if (alloc_ok) 1979 goto retry; 1980 /* 1981 * We were not able to allocate enough pages to 1982 * satisfy the entire reservation so we free what 1983 * we've allocated so far. 1984 */ 1985 goto free; 1986 } 1987 /* 1988 * The surplus_list now contains _at_least_ the number of extra pages 1989 * needed to accommodate the reservation. Add the appropriate number 1990 * of pages to the hugetlb pool and free the extras back to the buddy 1991 * allocator. Commit the entire reservation here to prevent another 1992 * process from stealing the pages as they are added to the pool but 1993 * before they are reserved. 1994 */ 1995 needed += allocated; 1996 h->resv_huge_pages += delta; 1997 ret = 0; 1998 1999 /* Free the needed pages to the hugetlb pool */ 2000 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 2001 int zeroed; 2002 2003 if ((--needed) < 0) 2004 break; 2005 /* 2006 * This page is now managed by the hugetlb allocator and has 2007 * no users -- drop the buddy allocator's reference. 2008 */ 2009 zeroed = put_page_testzero(page); 2010 VM_BUG_ON_PAGE(!zeroed, page); 2011 enqueue_huge_page(h, page); 2012 } 2013 free: 2014 spin_unlock(&hugetlb_lock); 2015 2016 /* Free unnecessary surplus pages to the buddy allocator */ 2017 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2018 put_page(page); 2019 spin_lock(&hugetlb_lock); 2020 2021 return ret; 2022 } 2023 2024 /* 2025 * This routine has two main purposes: 2026 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2027 * in unused_resv_pages. This corresponds to the prior adjustments made 2028 * to the associated reservation map. 2029 * 2) Free any unused surplus pages that may have been allocated to satisfy 2030 * the reservation. As many as unused_resv_pages may be freed. 2031 * 2032 * Called with hugetlb_lock held. However, the lock could be dropped (and 2033 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, 2034 * we must make sure nobody else can claim pages we are in the process of 2035 * freeing. Do this by ensuring resv_huge_page always is greater than the 2036 * number of huge pages we plan to free when dropping the lock. 2037 */ 2038 static void return_unused_surplus_pages(struct hstate *h, 2039 unsigned long unused_resv_pages) 2040 { 2041 unsigned long nr_pages; 2042 2043 /* Cannot return gigantic pages currently */ 2044 if (hstate_is_gigantic(h)) 2045 goto out; 2046 2047 /* 2048 * Part (or even all) of the reservation could have been backed 2049 * by pre-allocated pages. Only free surplus pages. 2050 */ 2051 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2052 2053 /* 2054 * We want to release as many surplus pages as possible, spread 2055 * evenly across all nodes with memory. Iterate across these nodes 2056 * until we can no longer free unreserved surplus pages. This occurs 2057 * when the nodes with surplus pages have no free pages. 2058 * free_pool_huge_page() will balance the freed pages across the 2059 * on-line nodes with memory and will handle the hstate accounting. 2060 * 2061 * Note that we decrement resv_huge_pages as we free the pages. If 2062 * we drop the lock, resv_huge_pages will still be sufficiently large 2063 * to cover subsequent pages we may free. 2064 */ 2065 while (nr_pages--) { 2066 h->resv_huge_pages--; 2067 unused_resv_pages--; 2068 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 2069 goto out; 2070 cond_resched_lock(&hugetlb_lock); 2071 } 2072 2073 out: 2074 /* Fully uncommit the reservation */ 2075 h->resv_huge_pages -= unused_resv_pages; 2076 } 2077 2078 2079 /* 2080 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2081 * are used by the huge page allocation routines to manage reservations. 2082 * 2083 * vma_needs_reservation is called to determine if the huge page at addr 2084 * within the vma has an associated reservation. If a reservation is 2085 * needed, the value 1 is returned. The caller is then responsible for 2086 * managing the global reservation and subpool usage counts. After 2087 * the huge page has been allocated, vma_commit_reservation is called 2088 * to add the page to the reservation map. If the page allocation fails, 2089 * the reservation must be ended instead of committed. vma_end_reservation 2090 * is called in such cases. 2091 * 2092 * In the normal case, vma_commit_reservation returns the same value 2093 * as the preceding vma_needs_reservation call. The only time this 2094 * is not the case is if a reserve map was changed between calls. It 2095 * is the responsibility of the caller to notice the difference and 2096 * take appropriate action. 2097 * 2098 * vma_add_reservation is used in error paths where a reservation must 2099 * be restored when a newly allocated huge page must be freed. It is 2100 * to be called after calling vma_needs_reservation to determine if a 2101 * reservation exists. 2102 */ 2103 enum vma_resv_mode { 2104 VMA_NEEDS_RESV, 2105 VMA_COMMIT_RESV, 2106 VMA_END_RESV, 2107 VMA_ADD_RESV, 2108 }; 2109 static long __vma_reservation_common(struct hstate *h, 2110 struct vm_area_struct *vma, unsigned long addr, 2111 enum vma_resv_mode mode) 2112 { 2113 struct resv_map *resv; 2114 pgoff_t idx; 2115 long ret; 2116 long dummy_out_regions_needed; 2117 2118 resv = vma_resv_map(vma); 2119 if (!resv) 2120 return 1; 2121 2122 idx = vma_hugecache_offset(h, vma, addr); 2123 switch (mode) { 2124 case VMA_NEEDS_RESV: 2125 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2126 /* We assume that vma_reservation_* routines always operate on 2127 * 1 page, and that adding to resv map a 1 page entry can only 2128 * ever require 1 region. 2129 */ 2130 VM_BUG_ON(dummy_out_regions_needed != 1); 2131 break; 2132 case VMA_COMMIT_RESV: 2133 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2134 /* region_add calls of range 1 should never fail. */ 2135 VM_BUG_ON(ret < 0); 2136 break; 2137 case VMA_END_RESV: 2138 region_abort(resv, idx, idx + 1, 1); 2139 ret = 0; 2140 break; 2141 case VMA_ADD_RESV: 2142 if (vma->vm_flags & VM_MAYSHARE) { 2143 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2144 /* region_add calls of range 1 should never fail. */ 2145 VM_BUG_ON(ret < 0); 2146 } else { 2147 region_abort(resv, idx, idx + 1, 1); 2148 ret = region_del(resv, idx, idx + 1); 2149 } 2150 break; 2151 default: 2152 BUG(); 2153 } 2154 2155 if (vma->vm_flags & VM_MAYSHARE) 2156 return ret; 2157 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { 2158 /* 2159 * In most cases, reserves always exist for private mappings. 2160 * However, a file associated with mapping could have been 2161 * hole punched or truncated after reserves were consumed. 2162 * As subsequent fault on such a range will not use reserves. 2163 * Subtle - The reserve map for private mappings has the 2164 * opposite meaning than that of shared mappings. If NO 2165 * entry is in the reserve map, it means a reservation exists. 2166 * If an entry exists in the reserve map, it means the 2167 * reservation has already been consumed. As a result, the 2168 * return value of this routine is the opposite of the 2169 * value returned from reserve map manipulation routines above. 2170 */ 2171 if (ret) 2172 return 0; 2173 else 2174 return 1; 2175 } 2176 else 2177 return ret < 0 ? ret : 0; 2178 } 2179 2180 static long vma_needs_reservation(struct hstate *h, 2181 struct vm_area_struct *vma, unsigned long addr) 2182 { 2183 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2184 } 2185 2186 static long vma_commit_reservation(struct hstate *h, 2187 struct vm_area_struct *vma, unsigned long addr) 2188 { 2189 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2190 } 2191 2192 static void vma_end_reservation(struct hstate *h, 2193 struct vm_area_struct *vma, unsigned long addr) 2194 { 2195 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2196 } 2197 2198 static long vma_add_reservation(struct hstate *h, 2199 struct vm_area_struct *vma, unsigned long addr) 2200 { 2201 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2202 } 2203 2204 /* 2205 * This routine is called to restore a reservation on error paths. In the 2206 * specific error paths, a huge page was allocated (via alloc_huge_page) 2207 * and is about to be freed. If a reservation for the page existed, 2208 * alloc_huge_page would have consumed the reservation and set 2209 * HPageRestoreReserve in the newly allocated page. When the page is freed 2210 * via free_huge_page, the global reservation count will be incremented if 2211 * HPageRestoreReserve is set. However, free_huge_page can not adjust the 2212 * reserve map. Adjust the reserve map here to be consistent with global 2213 * reserve count adjustments to be made by free_huge_page. 2214 */ 2215 static void restore_reserve_on_error(struct hstate *h, 2216 struct vm_area_struct *vma, unsigned long address, 2217 struct page *page) 2218 { 2219 if (unlikely(HPageRestoreReserve(page))) { 2220 long rc = vma_needs_reservation(h, vma, address); 2221 2222 if (unlikely(rc < 0)) { 2223 /* 2224 * Rare out of memory condition in reserve map 2225 * manipulation. Clear HPageRestoreReserve so that 2226 * global reserve count will not be incremented 2227 * by free_huge_page. This will make it appear 2228 * as though the reservation for this page was 2229 * consumed. This may prevent the task from 2230 * faulting in the page at a later time. This 2231 * is better than inconsistent global huge page 2232 * accounting of reserve counts. 2233 */ 2234 ClearHPageRestoreReserve(page); 2235 } else if (rc) { 2236 rc = vma_add_reservation(h, vma, address); 2237 if (unlikely(rc < 0)) 2238 /* 2239 * See above comment about rare out of 2240 * memory condition. 2241 */ 2242 ClearHPageRestoreReserve(page); 2243 } else 2244 vma_end_reservation(h, vma, address); 2245 } 2246 } 2247 2248 struct page *alloc_huge_page(struct vm_area_struct *vma, 2249 unsigned long addr, int avoid_reserve) 2250 { 2251 struct hugepage_subpool *spool = subpool_vma(vma); 2252 struct hstate *h = hstate_vma(vma); 2253 struct page *page; 2254 long map_chg, map_commit; 2255 long gbl_chg; 2256 int ret, idx; 2257 struct hugetlb_cgroup *h_cg; 2258 bool deferred_reserve; 2259 2260 idx = hstate_index(h); 2261 /* 2262 * Examine the region/reserve map to determine if the process 2263 * has a reservation for the page to be allocated. A return 2264 * code of zero indicates a reservation exists (no change). 2265 */ 2266 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2267 if (map_chg < 0) 2268 return ERR_PTR(-ENOMEM); 2269 2270 /* 2271 * Processes that did not create the mapping will have no 2272 * reserves as indicated by the region/reserve map. Check 2273 * that the allocation will not exceed the subpool limit. 2274 * Allocations for MAP_NORESERVE mappings also need to be 2275 * checked against any subpool limit. 2276 */ 2277 if (map_chg || avoid_reserve) { 2278 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2279 if (gbl_chg < 0) { 2280 vma_end_reservation(h, vma, addr); 2281 return ERR_PTR(-ENOSPC); 2282 } 2283 2284 /* 2285 * Even though there was no reservation in the region/reserve 2286 * map, there could be reservations associated with the 2287 * subpool that can be used. This would be indicated if the 2288 * return value of hugepage_subpool_get_pages() is zero. 2289 * However, if avoid_reserve is specified we still avoid even 2290 * the subpool reservations. 2291 */ 2292 if (avoid_reserve) 2293 gbl_chg = 1; 2294 } 2295 2296 /* If this allocation is not consuming a reservation, charge it now. 2297 */ 2298 deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma); 2299 if (deferred_reserve) { 2300 ret = hugetlb_cgroup_charge_cgroup_rsvd( 2301 idx, pages_per_huge_page(h), &h_cg); 2302 if (ret) 2303 goto out_subpool_put; 2304 } 2305 2306 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2307 if (ret) 2308 goto out_uncharge_cgroup_reservation; 2309 2310 spin_lock(&hugetlb_lock); 2311 /* 2312 * glb_chg is passed to indicate whether or not a page must be taken 2313 * from the global free pool (global change). gbl_chg == 0 indicates 2314 * a reservation exists for the allocation. 2315 */ 2316 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2317 if (!page) { 2318 spin_unlock(&hugetlb_lock); 2319 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 2320 if (!page) 2321 goto out_uncharge_cgroup; 2322 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2323 SetHPageRestoreReserve(page); 2324 h->resv_huge_pages--; 2325 } 2326 spin_lock(&hugetlb_lock); 2327 list_add(&page->lru, &h->hugepage_activelist); 2328 /* Fall through */ 2329 } 2330 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2331 /* If allocation is not consuming a reservation, also store the 2332 * hugetlb_cgroup pointer on the page. 2333 */ 2334 if (deferred_reserve) { 2335 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 2336 h_cg, page); 2337 } 2338 2339 spin_unlock(&hugetlb_lock); 2340 2341 hugetlb_set_page_subpool(page, spool); 2342 2343 map_commit = vma_commit_reservation(h, vma, addr); 2344 if (unlikely(map_chg > map_commit)) { 2345 /* 2346 * The page was added to the reservation map between 2347 * vma_needs_reservation and vma_commit_reservation. 2348 * This indicates a race with hugetlb_reserve_pages. 2349 * Adjust for the subpool count incremented above AND 2350 * in hugetlb_reserve_pages for the same page. Also, 2351 * the reservation count added in hugetlb_reserve_pages 2352 * no longer applies. 2353 */ 2354 long rsv_adjust; 2355 2356 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2357 hugetlb_acct_memory(h, -rsv_adjust); 2358 if (deferred_reserve) 2359 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 2360 pages_per_huge_page(h), page); 2361 } 2362 return page; 2363 2364 out_uncharge_cgroup: 2365 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2366 out_uncharge_cgroup_reservation: 2367 if (deferred_reserve) 2368 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 2369 h_cg); 2370 out_subpool_put: 2371 if (map_chg || avoid_reserve) 2372 hugepage_subpool_put_pages(spool, 1); 2373 vma_end_reservation(h, vma, addr); 2374 return ERR_PTR(-ENOSPC); 2375 } 2376 2377 int alloc_bootmem_huge_page(struct hstate *h) 2378 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 2379 int __alloc_bootmem_huge_page(struct hstate *h) 2380 { 2381 struct huge_bootmem_page *m; 2382 int nr_nodes, node; 2383 2384 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2385 void *addr; 2386 2387 addr = memblock_alloc_try_nid_raw( 2388 huge_page_size(h), huge_page_size(h), 2389 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 2390 if (addr) { 2391 /* 2392 * Use the beginning of the huge page to store the 2393 * huge_bootmem_page struct (until gather_bootmem 2394 * puts them into the mem_map). 2395 */ 2396 m = addr; 2397 goto found; 2398 } 2399 } 2400 return 0; 2401 2402 found: 2403 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); 2404 /* Put them into a private list first because mem_map is not up yet */ 2405 INIT_LIST_HEAD(&m->list); 2406 list_add(&m->list, &huge_boot_pages); 2407 m->hstate = h; 2408 return 1; 2409 } 2410 2411 static void __init prep_compound_huge_page(struct page *page, 2412 unsigned int order) 2413 { 2414 if (unlikely(order > (MAX_ORDER - 1))) 2415 prep_compound_gigantic_page(page, order); 2416 else 2417 prep_compound_page(page, order); 2418 } 2419 2420 /* Put bootmem huge pages into the standard lists after mem_map is up */ 2421 static void __init gather_bootmem_prealloc(void) 2422 { 2423 struct huge_bootmem_page *m; 2424 2425 list_for_each_entry(m, &huge_boot_pages, list) { 2426 struct page *page = virt_to_page(m); 2427 struct hstate *h = m->hstate; 2428 2429 WARN_ON(page_count(page) != 1); 2430 prep_compound_huge_page(page, huge_page_order(h)); 2431 WARN_ON(PageReserved(page)); 2432 prep_new_huge_page(h, page, page_to_nid(page)); 2433 put_page(page); /* free it into the hugepage allocator */ 2434 2435 /* 2436 * If we had gigantic hugepages allocated at boot time, we need 2437 * to restore the 'stolen' pages to totalram_pages in order to 2438 * fix confusing memory reports from free(1) and another 2439 * side-effects, like CommitLimit going negative. 2440 */ 2441 if (hstate_is_gigantic(h)) 2442 adjust_managed_page_count(page, pages_per_huge_page(h)); 2443 cond_resched(); 2444 } 2445 } 2446 2447 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 2448 { 2449 unsigned long i; 2450 nodemask_t *node_alloc_noretry; 2451 2452 if (!hstate_is_gigantic(h)) { 2453 /* 2454 * Bit mask controlling how hard we retry per-node allocations. 2455 * Ignore errors as lower level routines can deal with 2456 * node_alloc_noretry == NULL. If this kmalloc fails at boot 2457 * time, we are likely in bigger trouble. 2458 */ 2459 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 2460 GFP_KERNEL); 2461 } else { 2462 /* allocations done at boot time */ 2463 node_alloc_noretry = NULL; 2464 } 2465 2466 /* bit mask controlling how hard we retry per-node allocations */ 2467 if (node_alloc_noretry) 2468 nodes_clear(*node_alloc_noretry); 2469 2470 for (i = 0; i < h->max_huge_pages; ++i) { 2471 if (hstate_is_gigantic(h)) { 2472 if (hugetlb_cma_size) { 2473 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 2474 goto free; 2475 } 2476 if (!alloc_bootmem_huge_page(h)) 2477 break; 2478 } else if (!alloc_pool_huge_page(h, 2479 &node_states[N_MEMORY], 2480 node_alloc_noretry)) 2481 break; 2482 cond_resched(); 2483 } 2484 if (i < h->max_huge_pages) { 2485 char buf[32]; 2486 2487 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2488 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 2489 h->max_huge_pages, buf, i); 2490 h->max_huge_pages = i; 2491 } 2492 free: 2493 kfree(node_alloc_noretry); 2494 } 2495 2496 static void __init hugetlb_init_hstates(void) 2497 { 2498 struct hstate *h; 2499 2500 for_each_hstate(h) { 2501 if (minimum_order > huge_page_order(h)) 2502 minimum_order = huge_page_order(h); 2503 2504 /* oversize hugepages were init'ed in early boot */ 2505 if (!hstate_is_gigantic(h)) 2506 hugetlb_hstate_alloc_pages(h); 2507 } 2508 VM_BUG_ON(minimum_order == UINT_MAX); 2509 } 2510 2511 static void __init report_hugepages(void) 2512 { 2513 struct hstate *h; 2514 2515 for_each_hstate(h) { 2516 char buf[32]; 2517 2518 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 2519 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 2520 buf, h->free_huge_pages); 2521 } 2522 } 2523 2524 #ifdef CONFIG_HIGHMEM 2525 static void try_to_free_low(struct hstate *h, unsigned long count, 2526 nodemask_t *nodes_allowed) 2527 { 2528 int i; 2529 2530 if (hstate_is_gigantic(h)) 2531 return; 2532 2533 for_each_node_mask(i, *nodes_allowed) { 2534 struct page *page, *next; 2535 struct list_head *freel = &h->hugepage_freelists[i]; 2536 list_for_each_entry_safe(page, next, freel, lru) { 2537 if (count >= h->nr_huge_pages) 2538 return; 2539 if (PageHighMem(page)) 2540 continue; 2541 list_del(&page->lru); 2542 update_and_free_page(h, page); 2543 h->free_huge_pages--; 2544 h->free_huge_pages_node[page_to_nid(page)]--; 2545 } 2546 } 2547 } 2548 #else 2549 static inline void try_to_free_low(struct hstate *h, unsigned long count, 2550 nodemask_t *nodes_allowed) 2551 { 2552 } 2553 #endif 2554 2555 /* 2556 * Increment or decrement surplus_huge_pages. Keep node-specific counters 2557 * balanced by operating on them in a round-robin fashion. 2558 * Returns 1 if an adjustment was made. 2559 */ 2560 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 2561 int delta) 2562 { 2563 int nr_nodes, node; 2564 2565 VM_BUG_ON(delta != -1 && delta != 1); 2566 2567 if (delta < 0) { 2568 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2569 if (h->surplus_huge_pages_node[node]) 2570 goto found; 2571 } 2572 } else { 2573 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2574 if (h->surplus_huge_pages_node[node] < 2575 h->nr_huge_pages_node[node]) 2576 goto found; 2577 } 2578 } 2579 return 0; 2580 2581 found: 2582 h->surplus_huge_pages += delta; 2583 h->surplus_huge_pages_node[node] += delta; 2584 return 1; 2585 } 2586 2587 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 2588 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 2589 nodemask_t *nodes_allowed) 2590 { 2591 unsigned long min_count, ret; 2592 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 2593 2594 /* 2595 * Bit mask controlling how hard we retry per-node allocations. 2596 * If we can not allocate the bit mask, do not attempt to allocate 2597 * the requested huge pages. 2598 */ 2599 if (node_alloc_noretry) 2600 nodes_clear(*node_alloc_noretry); 2601 else 2602 return -ENOMEM; 2603 2604 spin_lock(&hugetlb_lock); 2605 2606 /* 2607 * Check for a node specific request. 2608 * Changing node specific huge page count may require a corresponding 2609 * change to the global count. In any case, the passed node mask 2610 * (nodes_allowed) will restrict alloc/free to the specified node. 2611 */ 2612 if (nid != NUMA_NO_NODE) { 2613 unsigned long old_count = count; 2614 2615 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 2616 /* 2617 * User may have specified a large count value which caused the 2618 * above calculation to overflow. In this case, they wanted 2619 * to allocate as many huge pages as possible. Set count to 2620 * largest possible value to align with their intention. 2621 */ 2622 if (count < old_count) 2623 count = ULONG_MAX; 2624 } 2625 2626 /* 2627 * Gigantic pages runtime allocation depend on the capability for large 2628 * page range allocation. 2629 * If the system does not provide this feature, return an error when 2630 * the user tries to allocate gigantic pages but let the user free the 2631 * boottime allocated gigantic pages. 2632 */ 2633 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 2634 if (count > persistent_huge_pages(h)) { 2635 spin_unlock(&hugetlb_lock); 2636 NODEMASK_FREE(node_alloc_noretry); 2637 return -EINVAL; 2638 } 2639 /* Fall through to decrease pool */ 2640 } 2641 2642 /* 2643 * Increase the pool size 2644 * First take pages out of surplus state. Then make up the 2645 * remaining difference by allocating fresh huge pages. 2646 * 2647 * We might race with alloc_surplus_huge_page() here and be unable 2648 * to convert a surplus huge page to a normal huge page. That is 2649 * not critical, though, it just means the overall size of the 2650 * pool might be one hugepage larger than it needs to be, but 2651 * within all the constraints specified by the sysctls. 2652 */ 2653 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 2654 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 2655 break; 2656 } 2657 2658 while (count > persistent_huge_pages(h)) { 2659 /* 2660 * If this allocation races such that we no longer need the 2661 * page, free_huge_page will handle it by freeing the page 2662 * and reducing the surplus. 2663 */ 2664 spin_unlock(&hugetlb_lock); 2665 2666 /* yield cpu to avoid soft lockup */ 2667 cond_resched(); 2668 2669 ret = alloc_pool_huge_page(h, nodes_allowed, 2670 node_alloc_noretry); 2671 spin_lock(&hugetlb_lock); 2672 if (!ret) 2673 goto out; 2674 2675 /* Bail for signals. Probably ctrl-c from user */ 2676 if (signal_pending(current)) 2677 goto out; 2678 } 2679 2680 /* 2681 * Decrease the pool size 2682 * First return free pages to the buddy allocator (being careful 2683 * to keep enough around to satisfy reservations). Then place 2684 * pages into surplus state as needed so the pool will shrink 2685 * to the desired size as pages become free. 2686 * 2687 * By placing pages into the surplus state independent of the 2688 * overcommit value, we are allowing the surplus pool size to 2689 * exceed overcommit. There are few sane options here. Since 2690 * alloc_surplus_huge_page() is checking the global counter, 2691 * though, we'll note that we're not allowed to exceed surplus 2692 * and won't grow the pool anywhere else. Not until one of the 2693 * sysctls are changed, or the surplus pages go out of use. 2694 */ 2695 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 2696 min_count = max(count, min_count); 2697 try_to_free_low(h, min_count, nodes_allowed); 2698 while (min_count < persistent_huge_pages(h)) { 2699 if (!free_pool_huge_page(h, nodes_allowed, 0)) 2700 break; 2701 cond_resched_lock(&hugetlb_lock); 2702 } 2703 while (count < persistent_huge_pages(h)) { 2704 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 2705 break; 2706 } 2707 out: 2708 h->max_huge_pages = persistent_huge_pages(h); 2709 spin_unlock(&hugetlb_lock); 2710 2711 NODEMASK_FREE(node_alloc_noretry); 2712 2713 return 0; 2714 } 2715 2716 #define HSTATE_ATTR_RO(_name) \ 2717 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 2718 2719 #define HSTATE_ATTR(_name) \ 2720 static struct kobj_attribute _name##_attr = \ 2721 __ATTR(_name, 0644, _name##_show, _name##_store) 2722 2723 static struct kobject *hugepages_kobj; 2724 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2725 2726 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 2727 2728 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 2729 { 2730 int i; 2731 2732 for (i = 0; i < HUGE_MAX_HSTATE; i++) 2733 if (hstate_kobjs[i] == kobj) { 2734 if (nidp) 2735 *nidp = NUMA_NO_NODE; 2736 return &hstates[i]; 2737 } 2738 2739 return kobj_to_node_hstate(kobj, nidp); 2740 } 2741 2742 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 2743 struct kobj_attribute *attr, char *buf) 2744 { 2745 struct hstate *h; 2746 unsigned long nr_huge_pages; 2747 int nid; 2748 2749 h = kobj_to_hstate(kobj, &nid); 2750 if (nid == NUMA_NO_NODE) 2751 nr_huge_pages = h->nr_huge_pages; 2752 else 2753 nr_huge_pages = h->nr_huge_pages_node[nid]; 2754 2755 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 2756 } 2757 2758 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 2759 struct hstate *h, int nid, 2760 unsigned long count, size_t len) 2761 { 2762 int err; 2763 nodemask_t nodes_allowed, *n_mask; 2764 2765 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2766 return -EINVAL; 2767 2768 if (nid == NUMA_NO_NODE) { 2769 /* 2770 * global hstate attribute 2771 */ 2772 if (!(obey_mempolicy && 2773 init_nodemask_of_mempolicy(&nodes_allowed))) 2774 n_mask = &node_states[N_MEMORY]; 2775 else 2776 n_mask = &nodes_allowed; 2777 } else { 2778 /* 2779 * Node specific request. count adjustment happens in 2780 * set_max_huge_pages() after acquiring hugetlb_lock. 2781 */ 2782 init_nodemask_of_node(&nodes_allowed, nid); 2783 n_mask = &nodes_allowed; 2784 } 2785 2786 err = set_max_huge_pages(h, count, nid, n_mask); 2787 2788 return err ? err : len; 2789 } 2790 2791 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 2792 struct kobject *kobj, const char *buf, 2793 size_t len) 2794 { 2795 struct hstate *h; 2796 unsigned long count; 2797 int nid; 2798 int err; 2799 2800 err = kstrtoul(buf, 10, &count); 2801 if (err) 2802 return err; 2803 2804 h = kobj_to_hstate(kobj, &nid); 2805 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 2806 } 2807 2808 static ssize_t nr_hugepages_show(struct kobject *kobj, 2809 struct kobj_attribute *attr, char *buf) 2810 { 2811 return nr_hugepages_show_common(kobj, attr, buf); 2812 } 2813 2814 static ssize_t nr_hugepages_store(struct kobject *kobj, 2815 struct kobj_attribute *attr, const char *buf, size_t len) 2816 { 2817 return nr_hugepages_store_common(false, kobj, buf, len); 2818 } 2819 HSTATE_ATTR(nr_hugepages); 2820 2821 #ifdef CONFIG_NUMA 2822 2823 /* 2824 * hstate attribute for optionally mempolicy-based constraint on persistent 2825 * huge page alloc/free. 2826 */ 2827 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 2828 struct kobj_attribute *attr, 2829 char *buf) 2830 { 2831 return nr_hugepages_show_common(kobj, attr, buf); 2832 } 2833 2834 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 2835 struct kobj_attribute *attr, const char *buf, size_t len) 2836 { 2837 return nr_hugepages_store_common(true, kobj, buf, len); 2838 } 2839 HSTATE_ATTR(nr_hugepages_mempolicy); 2840 #endif 2841 2842 2843 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 2844 struct kobj_attribute *attr, char *buf) 2845 { 2846 struct hstate *h = kobj_to_hstate(kobj, NULL); 2847 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 2848 } 2849 2850 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 2851 struct kobj_attribute *attr, const char *buf, size_t count) 2852 { 2853 int err; 2854 unsigned long input; 2855 struct hstate *h = kobj_to_hstate(kobj, NULL); 2856 2857 if (hstate_is_gigantic(h)) 2858 return -EINVAL; 2859 2860 err = kstrtoul(buf, 10, &input); 2861 if (err) 2862 return err; 2863 2864 spin_lock(&hugetlb_lock); 2865 h->nr_overcommit_huge_pages = input; 2866 spin_unlock(&hugetlb_lock); 2867 2868 return count; 2869 } 2870 HSTATE_ATTR(nr_overcommit_hugepages); 2871 2872 static ssize_t free_hugepages_show(struct kobject *kobj, 2873 struct kobj_attribute *attr, char *buf) 2874 { 2875 struct hstate *h; 2876 unsigned long free_huge_pages; 2877 int nid; 2878 2879 h = kobj_to_hstate(kobj, &nid); 2880 if (nid == NUMA_NO_NODE) 2881 free_huge_pages = h->free_huge_pages; 2882 else 2883 free_huge_pages = h->free_huge_pages_node[nid]; 2884 2885 return sysfs_emit(buf, "%lu\n", free_huge_pages); 2886 } 2887 HSTATE_ATTR_RO(free_hugepages); 2888 2889 static ssize_t resv_hugepages_show(struct kobject *kobj, 2890 struct kobj_attribute *attr, char *buf) 2891 { 2892 struct hstate *h = kobj_to_hstate(kobj, NULL); 2893 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 2894 } 2895 HSTATE_ATTR_RO(resv_hugepages); 2896 2897 static ssize_t surplus_hugepages_show(struct kobject *kobj, 2898 struct kobj_attribute *attr, char *buf) 2899 { 2900 struct hstate *h; 2901 unsigned long surplus_huge_pages; 2902 int nid; 2903 2904 h = kobj_to_hstate(kobj, &nid); 2905 if (nid == NUMA_NO_NODE) 2906 surplus_huge_pages = h->surplus_huge_pages; 2907 else 2908 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 2909 2910 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 2911 } 2912 HSTATE_ATTR_RO(surplus_hugepages); 2913 2914 static struct attribute *hstate_attrs[] = { 2915 &nr_hugepages_attr.attr, 2916 &nr_overcommit_hugepages_attr.attr, 2917 &free_hugepages_attr.attr, 2918 &resv_hugepages_attr.attr, 2919 &surplus_hugepages_attr.attr, 2920 #ifdef CONFIG_NUMA 2921 &nr_hugepages_mempolicy_attr.attr, 2922 #endif 2923 NULL, 2924 }; 2925 2926 static const struct attribute_group hstate_attr_group = { 2927 .attrs = hstate_attrs, 2928 }; 2929 2930 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 2931 struct kobject **hstate_kobjs, 2932 const struct attribute_group *hstate_attr_group) 2933 { 2934 int retval; 2935 int hi = hstate_index(h); 2936 2937 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 2938 if (!hstate_kobjs[hi]) 2939 return -ENOMEM; 2940 2941 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 2942 if (retval) { 2943 kobject_put(hstate_kobjs[hi]); 2944 hstate_kobjs[hi] = NULL; 2945 } 2946 2947 return retval; 2948 } 2949 2950 static void __init hugetlb_sysfs_init(void) 2951 { 2952 struct hstate *h; 2953 int err; 2954 2955 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 2956 if (!hugepages_kobj) 2957 return; 2958 2959 for_each_hstate(h) { 2960 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 2961 hstate_kobjs, &hstate_attr_group); 2962 if (err) 2963 pr_err("HugeTLB: Unable to add hstate %s", h->name); 2964 } 2965 } 2966 2967 #ifdef CONFIG_NUMA 2968 2969 /* 2970 * node_hstate/s - associate per node hstate attributes, via their kobjects, 2971 * with node devices in node_devices[] using a parallel array. The array 2972 * index of a node device or _hstate == node id. 2973 * This is here to avoid any static dependency of the node device driver, in 2974 * the base kernel, on the hugetlb module. 2975 */ 2976 struct node_hstate { 2977 struct kobject *hugepages_kobj; 2978 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 2979 }; 2980 static struct node_hstate node_hstates[MAX_NUMNODES]; 2981 2982 /* 2983 * A subset of global hstate attributes for node devices 2984 */ 2985 static struct attribute *per_node_hstate_attrs[] = { 2986 &nr_hugepages_attr.attr, 2987 &free_hugepages_attr.attr, 2988 &surplus_hugepages_attr.attr, 2989 NULL, 2990 }; 2991 2992 static const struct attribute_group per_node_hstate_attr_group = { 2993 .attrs = per_node_hstate_attrs, 2994 }; 2995 2996 /* 2997 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 2998 * Returns node id via non-NULL nidp. 2999 */ 3000 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 3001 { 3002 int nid; 3003 3004 for (nid = 0; nid < nr_node_ids; nid++) { 3005 struct node_hstate *nhs = &node_hstates[nid]; 3006 int i; 3007 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3008 if (nhs->hstate_kobjs[i] == kobj) { 3009 if (nidp) 3010 *nidp = nid; 3011 return &hstates[i]; 3012 } 3013 } 3014 3015 BUG(); 3016 return NULL; 3017 } 3018 3019 /* 3020 * Unregister hstate attributes from a single node device. 3021 * No-op if no hstate attributes attached. 3022 */ 3023 static void hugetlb_unregister_node(struct node *node) 3024 { 3025 struct hstate *h; 3026 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3027 3028 if (!nhs->hugepages_kobj) 3029 return; /* no hstate attributes */ 3030 3031 for_each_hstate(h) { 3032 int idx = hstate_index(h); 3033 if (nhs->hstate_kobjs[idx]) { 3034 kobject_put(nhs->hstate_kobjs[idx]); 3035 nhs->hstate_kobjs[idx] = NULL; 3036 } 3037 } 3038 3039 kobject_put(nhs->hugepages_kobj); 3040 nhs->hugepages_kobj = NULL; 3041 } 3042 3043 3044 /* 3045 * Register hstate attributes for a single node device. 3046 * No-op if attributes already registered. 3047 */ 3048 static void hugetlb_register_node(struct node *node) 3049 { 3050 struct hstate *h; 3051 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3052 int err; 3053 3054 if (nhs->hugepages_kobj) 3055 return; /* already allocated */ 3056 3057 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 3058 &node->dev.kobj); 3059 if (!nhs->hugepages_kobj) 3060 return; 3061 3062 for_each_hstate(h) { 3063 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 3064 nhs->hstate_kobjs, 3065 &per_node_hstate_attr_group); 3066 if (err) { 3067 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 3068 h->name, node->dev.id); 3069 hugetlb_unregister_node(node); 3070 break; 3071 } 3072 } 3073 } 3074 3075 /* 3076 * hugetlb init time: register hstate attributes for all registered node 3077 * devices of nodes that have memory. All on-line nodes should have 3078 * registered their associated device by this time. 3079 */ 3080 static void __init hugetlb_register_all_nodes(void) 3081 { 3082 int nid; 3083 3084 for_each_node_state(nid, N_MEMORY) { 3085 struct node *node = node_devices[nid]; 3086 if (node->dev.id == nid) 3087 hugetlb_register_node(node); 3088 } 3089 3090 /* 3091 * Let the node device driver know we're here so it can 3092 * [un]register hstate attributes on node hotplug. 3093 */ 3094 register_hugetlbfs_with_node(hugetlb_register_node, 3095 hugetlb_unregister_node); 3096 } 3097 #else /* !CONFIG_NUMA */ 3098 3099 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 3100 { 3101 BUG(); 3102 if (nidp) 3103 *nidp = -1; 3104 return NULL; 3105 } 3106 3107 static void hugetlb_register_all_nodes(void) { } 3108 3109 #endif 3110 3111 static int __init hugetlb_init(void) 3112 { 3113 int i; 3114 3115 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 3116 __NR_HPAGEFLAGS); 3117 3118 if (!hugepages_supported()) { 3119 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 3120 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 3121 return 0; 3122 } 3123 3124 /* 3125 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 3126 * architectures depend on setup being done here. 3127 */ 3128 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 3129 if (!parsed_default_hugepagesz) { 3130 /* 3131 * If we did not parse a default huge page size, set 3132 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 3133 * number of huge pages for this default size was implicitly 3134 * specified, set that here as well. 3135 * Note that the implicit setting will overwrite an explicit 3136 * setting. A warning will be printed in this case. 3137 */ 3138 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 3139 if (default_hstate_max_huge_pages) { 3140 if (default_hstate.max_huge_pages) { 3141 char buf[32]; 3142 3143 string_get_size(huge_page_size(&default_hstate), 3144 1, STRING_UNITS_2, buf, 32); 3145 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 3146 default_hstate.max_huge_pages, buf); 3147 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 3148 default_hstate_max_huge_pages); 3149 } 3150 default_hstate.max_huge_pages = 3151 default_hstate_max_huge_pages; 3152 } 3153 } 3154 3155 hugetlb_cma_check(); 3156 hugetlb_init_hstates(); 3157 gather_bootmem_prealloc(); 3158 report_hugepages(); 3159 3160 hugetlb_sysfs_init(); 3161 hugetlb_register_all_nodes(); 3162 hugetlb_cgroup_file_init(); 3163 3164 #ifdef CONFIG_SMP 3165 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 3166 #else 3167 num_fault_mutexes = 1; 3168 #endif 3169 hugetlb_fault_mutex_table = 3170 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 3171 GFP_KERNEL); 3172 BUG_ON(!hugetlb_fault_mutex_table); 3173 3174 for (i = 0; i < num_fault_mutexes; i++) 3175 mutex_init(&hugetlb_fault_mutex_table[i]); 3176 return 0; 3177 } 3178 subsys_initcall(hugetlb_init); 3179 3180 /* Overwritten by architectures with more huge page sizes */ 3181 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 3182 { 3183 return size == HPAGE_SIZE; 3184 } 3185 3186 void __init hugetlb_add_hstate(unsigned int order) 3187 { 3188 struct hstate *h; 3189 unsigned long i; 3190 3191 if (size_to_hstate(PAGE_SIZE << order)) { 3192 return; 3193 } 3194 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 3195 BUG_ON(order == 0); 3196 h = &hstates[hugetlb_max_hstate++]; 3197 h->order = order; 3198 h->mask = ~(huge_page_size(h) - 1); 3199 for (i = 0; i < MAX_NUMNODES; ++i) 3200 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 3201 INIT_LIST_HEAD(&h->hugepage_activelist); 3202 h->next_nid_to_alloc = first_memory_node; 3203 h->next_nid_to_free = first_memory_node; 3204 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 3205 huge_page_size(h)/1024); 3206 3207 parsed_hstate = h; 3208 } 3209 3210 /* 3211 * hugepages command line processing 3212 * hugepages normally follows a valid hugepagsz or default_hugepagsz 3213 * specification. If not, ignore the hugepages value. hugepages can also 3214 * be the first huge page command line option in which case it implicitly 3215 * specifies the number of huge pages for the default size. 3216 */ 3217 static int __init hugepages_setup(char *s) 3218 { 3219 unsigned long *mhp; 3220 static unsigned long *last_mhp; 3221 3222 if (!parsed_valid_hugepagesz) { 3223 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 3224 parsed_valid_hugepagesz = true; 3225 return 0; 3226 } 3227 3228 /* 3229 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 3230 * yet, so this hugepages= parameter goes to the "default hstate". 3231 * Otherwise, it goes with the previously parsed hugepagesz or 3232 * default_hugepagesz. 3233 */ 3234 else if (!hugetlb_max_hstate) 3235 mhp = &default_hstate_max_huge_pages; 3236 else 3237 mhp = &parsed_hstate->max_huge_pages; 3238 3239 if (mhp == last_mhp) { 3240 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 3241 return 0; 3242 } 3243 3244 if (sscanf(s, "%lu", mhp) <= 0) 3245 *mhp = 0; 3246 3247 /* 3248 * Global state is always initialized later in hugetlb_init. 3249 * But we need to allocate >= MAX_ORDER hstates here early to still 3250 * use the bootmem allocator. 3251 */ 3252 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) 3253 hugetlb_hstate_alloc_pages(parsed_hstate); 3254 3255 last_mhp = mhp; 3256 3257 return 1; 3258 } 3259 __setup("hugepages=", hugepages_setup); 3260 3261 /* 3262 * hugepagesz command line processing 3263 * A specific huge page size can only be specified once with hugepagesz. 3264 * hugepagesz is followed by hugepages on the command line. The global 3265 * variable 'parsed_valid_hugepagesz' is used to determine if prior 3266 * hugepagesz argument was valid. 3267 */ 3268 static int __init hugepagesz_setup(char *s) 3269 { 3270 unsigned long size; 3271 struct hstate *h; 3272 3273 parsed_valid_hugepagesz = false; 3274 size = (unsigned long)memparse(s, NULL); 3275 3276 if (!arch_hugetlb_valid_size(size)) { 3277 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 3278 return 0; 3279 } 3280 3281 h = size_to_hstate(size); 3282 if (h) { 3283 /* 3284 * hstate for this size already exists. This is normally 3285 * an error, but is allowed if the existing hstate is the 3286 * default hstate. More specifically, it is only allowed if 3287 * the number of huge pages for the default hstate was not 3288 * previously specified. 3289 */ 3290 if (!parsed_default_hugepagesz || h != &default_hstate || 3291 default_hstate.max_huge_pages) { 3292 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 3293 return 0; 3294 } 3295 3296 /* 3297 * No need to call hugetlb_add_hstate() as hstate already 3298 * exists. But, do set parsed_hstate so that a following 3299 * hugepages= parameter will be applied to this hstate. 3300 */ 3301 parsed_hstate = h; 3302 parsed_valid_hugepagesz = true; 3303 return 1; 3304 } 3305 3306 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 3307 parsed_valid_hugepagesz = true; 3308 return 1; 3309 } 3310 __setup("hugepagesz=", hugepagesz_setup); 3311 3312 /* 3313 * default_hugepagesz command line input 3314 * Only one instance of default_hugepagesz allowed on command line. 3315 */ 3316 static int __init default_hugepagesz_setup(char *s) 3317 { 3318 unsigned long size; 3319 3320 parsed_valid_hugepagesz = false; 3321 if (parsed_default_hugepagesz) { 3322 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 3323 return 0; 3324 } 3325 3326 size = (unsigned long)memparse(s, NULL); 3327 3328 if (!arch_hugetlb_valid_size(size)) { 3329 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 3330 return 0; 3331 } 3332 3333 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 3334 parsed_valid_hugepagesz = true; 3335 parsed_default_hugepagesz = true; 3336 default_hstate_idx = hstate_index(size_to_hstate(size)); 3337 3338 /* 3339 * The number of default huge pages (for this size) could have been 3340 * specified as the first hugetlb parameter: hugepages=X. If so, 3341 * then default_hstate_max_huge_pages is set. If the default huge 3342 * page size is gigantic (>= MAX_ORDER), then the pages must be 3343 * allocated here from bootmem allocator. 3344 */ 3345 if (default_hstate_max_huge_pages) { 3346 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 3347 if (hstate_is_gigantic(&default_hstate)) 3348 hugetlb_hstate_alloc_pages(&default_hstate); 3349 default_hstate_max_huge_pages = 0; 3350 } 3351 3352 return 1; 3353 } 3354 __setup("default_hugepagesz=", default_hugepagesz_setup); 3355 3356 static unsigned int allowed_mems_nr(struct hstate *h) 3357 { 3358 int node; 3359 unsigned int nr = 0; 3360 nodemask_t *mpol_allowed; 3361 unsigned int *array = h->free_huge_pages_node; 3362 gfp_t gfp_mask = htlb_alloc_mask(h); 3363 3364 mpol_allowed = policy_nodemask_current(gfp_mask); 3365 3366 for_each_node_mask(node, cpuset_current_mems_allowed) { 3367 if (!mpol_allowed || node_isset(node, *mpol_allowed)) 3368 nr += array[node]; 3369 } 3370 3371 return nr; 3372 } 3373 3374 #ifdef CONFIG_SYSCTL 3375 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 3376 void *buffer, size_t *length, 3377 loff_t *ppos, unsigned long *out) 3378 { 3379 struct ctl_table dup_table; 3380 3381 /* 3382 * In order to avoid races with __do_proc_doulongvec_minmax(), we 3383 * can duplicate the @table and alter the duplicate of it. 3384 */ 3385 dup_table = *table; 3386 dup_table.data = out; 3387 3388 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 3389 } 3390 3391 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 3392 struct ctl_table *table, int write, 3393 void *buffer, size_t *length, loff_t *ppos) 3394 { 3395 struct hstate *h = &default_hstate; 3396 unsigned long tmp = h->max_huge_pages; 3397 int ret; 3398 3399 if (!hugepages_supported()) 3400 return -EOPNOTSUPP; 3401 3402 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 3403 &tmp); 3404 if (ret) 3405 goto out; 3406 3407 if (write) 3408 ret = __nr_hugepages_store_common(obey_mempolicy, h, 3409 NUMA_NO_NODE, tmp, *length); 3410 out: 3411 return ret; 3412 } 3413 3414 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 3415 void *buffer, size_t *length, loff_t *ppos) 3416 { 3417 3418 return hugetlb_sysctl_handler_common(false, table, write, 3419 buffer, length, ppos); 3420 } 3421 3422 #ifdef CONFIG_NUMA 3423 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 3424 void *buffer, size_t *length, loff_t *ppos) 3425 { 3426 return hugetlb_sysctl_handler_common(true, table, write, 3427 buffer, length, ppos); 3428 } 3429 #endif /* CONFIG_NUMA */ 3430 3431 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 3432 void *buffer, size_t *length, loff_t *ppos) 3433 { 3434 struct hstate *h = &default_hstate; 3435 unsigned long tmp; 3436 int ret; 3437 3438 if (!hugepages_supported()) 3439 return -EOPNOTSUPP; 3440 3441 tmp = h->nr_overcommit_huge_pages; 3442 3443 if (write && hstate_is_gigantic(h)) 3444 return -EINVAL; 3445 3446 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 3447 &tmp); 3448 if (ret) 3449 goto out; 3450 3451 if (write) { 3452 spin_lock(&hugetlb_lock); 3453 h->nr_overcommit_huge_pages = tmp; 3454 spin_unlock(&hugetlb_lock); 3455 } 3456 out: 3457 return ret; 3458 } 3459 3460 #endif /* CONFIG_SYSCTL */ 3461 3462 void hugetlb_report_meminfo(struct seq_file *m) 3463 { 3464 struct hstate *h; 3465 unsigned long total = 0; 3466 3467 if (!hugepages_supported()) 3468 return; 3469 3470 for_each_hstate(h) { 3471 unsigned long count = h->nr_huge_pages; 3472 3473 total += huge_page_size(h) * count; 3474 3475 if (h == &default_hstate) 3476 seq_printf(m, 3477 "HugePages_Total: %5lu\n" 3478 "HugePages_Free: %5lu\n" 3479 "HugePages_Rsvd: %5lu\n" 3480 "HugePages_Surp: %5lu\n" 3481 "Hugepagesize: %8lu kB\n", 3482 count, 3483 h->free_huge_pages, 3484 h->resv_huge_pages, 3485 h->surplus_huge_pages, 3486 huge_page_size(h) / SZ_1K); 3487 } 3488 3489 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 3490 } 3491 3492 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 3493 { 3494 struct hstate *h = &default_hstate; 3495 3496 if (!hugepages_supported()) 3497 return 0; 3498 3499 return sysfs_emit_at(buf, len, 3500 "Node %d HugePages_Total: %5u\n" 3501 "Node %d HugePages_Free: %5u\n" 3502 "Node %d HugePages_Surp: %5u\n", 3503 nid, h->nr_huge_pages_node[nid], 3504 nid, h->free_huge_pages_node[nid], 3505 nid, h->surplus_huge_pages_node[nid]); 3506 } 3507 3508 void hugetlb_show_meminfo(void) 3509 { 3510 struct hstate *h; 3511 int nid; 3512 3513 if (!hugepages_supported()) 3514 return; 3515 3516 for_each_node_state(nid, N_MEMORY) 3517 for_each_hstate(h) 3518 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 3519 nid, 3520 h->nr_huge_pages_node[nid], 3521 h->free_huge_pages_node[nid], 3522 h->surplus_huge_pages_node[nid], 3523 huge_page_size(h) / SZ_1K); 3524 } 3525 3526 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 3527 { 3528 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 3529 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 3530 } 3531 3532 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 3533 unsigned long hugetlb_total_pages(void) 3534 { 3535 struct hstate *h; 3536 unsigned long nr_total_pages = 0; 3537 3538 for_each_hstate(h) 3539 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 3540 return nr_total_pages; 3541 } 3542 3543 static int hugetlb_acct_memory(struct hstate *h, long delta) 3544 { 3545 int ret = -ENOMEM; 3546 3547 if (!delta) 3548 return 0; 3549 3550 spin_lock(&hugetlb_lock); 3551 /* 3552 * When cpuset is configured, it breaks the strict hugetlb page 3553 * reservation as the accounting is done on a global variable. Such 3554 * reservation is completely rubbish in the presence of cpuset because 3555 * the reservation is not checked against page availability for the 3556 * current cpuset. Application can still potentially OOM'ed by kernel 3557 * with lack of free htlb page in cpuset that the task is in. 3558 * Attempt to enforce strict accounting with cpuset is almost 3559 * impossible (or too ugly) because cpuset is too fluid that 3560 * task or memory node can be dynamically moved between cpusets. 3561 * 3562 * The change of semantics for shared hugetlb mapping with cpuset is 3563 * undesirable. However, in order to preserve some of the semantics, 3564 * we fall back to check against current free page availability as 3565 * a best attempt and hopefully to minimize the impact of changing 3566 * semantics that cpuset has. 3567 * 3568 * Apart from cpuset, we also have memory policy mechanism that 3569 * also determines from which node the kernel will allocate memory 3570 * in a NUMA system. So similar to cpuset, we also should consider 3571 * the memory policy of the current task. Similar to the description 3572 * above. 3573 */ 3574 if (delta > 0) { 3575 if (gather_surplus_pages(h, delta) < 0) 3576 goto out; 3577 3578 if (delta > allowed_mems_nr(h)) { 3579 return_unused_surplus_pages(h, delta); 3580 goto out; 3581 } 3582 } 3583 3584 ret = 0; 3585 if (delta < 0) 3586 return_unused_surplus_pages(h, (unsigned long) -delta); 3587 3588 out: 3589 spin_unlock(&hugetlb_lock); 3590 return ret; 3591 } 3592 3593 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 3594 { 3595 struct resv_map *resv = vma_resv_map(vma); 3596 3597 /* 3598 * This new VMA should share its siblings reservation map if present. 3599 * The VMA will only ever have a valid reservation map pointer where 3600 * it is being copied for another still existing VMA. As that VMA 3601 * has a reference to the reservation map it cannot disappear until 3602 * after this open call completes. It is therefore safe to take a 3603 * new reference here without additional locking. 3604 */ 3605 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3606 kref_get(&resv->refs); 3607 } 3608 3609 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 3610 { 3611 struct hstate *h = hstate_vma(vma); 3612 struct resv_map *resv = vma_resv_map(vma); 3613 struct hugepage_subpool *spool = subpool_vma(vma); 3614 unsigned long reserve, start, end; 3615 long gbl_reserve; 3616 3617 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3618 return; 3619 3620 start = vma_hugecache_offset(h, vma, vma->vm_start); 3621 end = vma_hugecache_offset(h, vma, vma->vm_end); 3622 3623 reserve = (end - start) - region_count(resv, start, end); 3624 hugetlb_cgroup_uncharge_counter(resv, start, end); 3625 if (reserve) { 3626 /* 3627 * Decrement reserve counts. The global reserve count may be 3628 * adjusted if the subpool has a minimum size. 3629 */ 3630 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 3631 hugetlb_acct_memory(h, -gbl_reserve); 3632 } 3633 3634 kref_put(&resv->refs, resv_map_release); 3635 } 3636 3637 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 3638 { 3639 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 3640 return -EINVAL; 3641 return 0; 3642 } 3643 3644 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 3645 { 3646 return huge_page_size(hstate_vma(vma)); 3647 } 3648 3649 /* 3650 * We cannot handle pagefaults against hugetlb pages at all. They cause 3651 * handle_mm_fault() to try to instantiate regular-sized pages in the 3652 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 3653 * this far. 3654 */ 3655 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 3656 { 3657 BUG(); 3658 return 0; 3659 } 3660 3661 /* 3662 * When a new function is introduced to vm_operations_struct and added 3663 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 3664 * This is because under System V memory model, mappings created via 3665 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 3666 * their original vm_ops are overwritten with shm_vm_ops. 3667 */ 3668 const struct vm_operations_struct hugetlb_vm_ops = { 3669 .fault = hugetlb_vm_op_fault, 3670 .open = hugetlb_vm_op_open, 3671 .close = hugetlb_vm_op_close, 3672 .may_split = hugetlb_vm_op_split, 3673 .pagesize = hugetlb_vm_op_pagesize, 3674 }; 3675 3676 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 3677 int writable) 3678 { 3679 pte_t entry; 3680 3681 if (writable) { 3682 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 3683 vma->vm_page_prot))); 3684 } else { 3685 entry = huge_pte_wrprotect(mk_huge_pte(page, 3686 vma->vm_page_prot)); 3687 } 3688 entry = pte_mkyoung(entry); 3689 entry = pte_mkhuge(entry); 3690 entry = arch_make_huge_pte(entry, vma, page, writable); 3691 3692 return entry; 3693 } 3694 3695 static void set_huge_ptep_writable(struct vm_area_struct *vma, 3696 unsigned long address, pte_t *ptep) 3697 { 3698 pte_t entry; 3699 3700 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 3701 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 3702 update_mmu_cache(vma, address, ptep); 3703 } 3704 3705 bool is_hugetlb_entry_migration(pte_t pte) 3706 { 3707 swp_entry_t swp; 3708 3709 if (huge_pte_none(pte) || pte_present(pte)) 3710 return false; 3711 swp = pte_to_swp_entry(pte); 3712 if (is_migration_entry(swp)) 3713 return true; 3714 else 3715 return false; 3716 } 3717 3718 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 3719 { 3720 swp_entry_t swp; 3721 3722 if (huge_pte_none(pte) || pte_present(pte)) 3723 return false; 3724 swp = pte_to_swp_entry(pte); 3725 if (is_hwpoison_entry(swp)) 3726 return true; 3727 else 3728 return false; 3729 } 3730 3731 static void 3732 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 3733 struct page *new_page) 3734 { 3735 __SetPageUptodate(new_page); 3736 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); 3737 hugepage_add_new_anon_rmap(new_page, vma, addr); 3738 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 3739 ClearHPageRestoreReserve(new_page); 3740 SetHPageMigratable(new_page); 3741 } 3742 3743 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 3744 struct vm_area_struct *vma) 3745 { 3746 pte_t *src_pte, *dst_pte, entry, dst_entry; 3747 struct page *ptepage; 3748 unsigned long addr; 3749 bool cow = is_cow_mapping(vma->vm_flags); 3750 struct hstate *h = hstate_vma(vma); 3751 unsigned long sz = huge_page_size(h); 3752 unsigned long npages = pages_per_huge_page(h); 3753 struct address_space *mapping = vma->vm_file->f_mapping; 3754 struct mmu_notifier_range range; 3755 int ret = 0; 3756 3757 if (cow) { 3758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, 3759 vma->vm_start, 3760 vma->vm_end); 3761 mmu_notifier_invalidate_range_start(&range); 3762 } else { 3763 /* 3764 * For shared mappings i_mmap_rwsem must be held to call 3765 * huge_pte_alloc, otherwise the returned ptep could go 3766 * away if part of a shared pmd and another thread calls 3767 * huge_pmd_unshare. 3768 */ 3769 i_mmap_lock_read(mapping); 3770 } 3771 3772 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3773 spinlock_t *src_ptl, *dst_ptl; 3774 src_pte = huge_pte_offset(src, addr, sz); 3775 if (!src_pte) 3776 continue; 3777 dst_pte = huge_pte_alloc(dst, addr, sz); 3778 if (!dst_pte) { 3779 ret = -ENOMEM; 3780 break; 3781 } 3782 3783 /* 3784 * If the pagetables are shared don't copy or take references. 3785 * dst_pte == src_pte is the common case of src/dest sharing. 3786 * 3787 * However, src could have 'unshared' and dst shares with 3788 * another vma. If dst_pte !none, this implies sharing. 3789 * Check here before taking page table lock, and once again 3790 * after taking the lock below. 3791 */ 3792 dst_entry = huge_ptep_get(dst_pte); 3793 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) 3794 continue; 3795 3796 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3797 src_ptl = huge_pte_lockptr(h, src, src_pte); 3798 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3799 entry = huge_ptep_get(src_pte); 3800 dst_entry = huge_ptep_get(dst_pte); 3801 again: 3802 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { 3803 /* 3804 * Skip if src entry none. Also, skip in the 3805 * unlikely case dst entry !none as this implies 3806 * sharing with another vma. 3807 */ 3808 ; 3809 } else if (unlikely(is_hugetlb_entry_migration(entry) || 3810 is_hugetlb_entry_hwpoisoned(entry))) { 3811 swp_entry_t swp_entry = pte_to_swp_entry(entry); 3812 3813 if (is_write_migration_entry(swp_entry) && cow) { 3814 /* 3815 * COW mappings require pages in both 3816 * parent and child to be set to read. 3817 */ 3818 make_migration_entry_read(&swp_entry); 3819 entry = swp_entry_to_pte(swp_entry); 3820 set_huge_swap_pte_at(src, addr, src_pte, 3821 entry, sz); 3822 } 3823 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); 3824 } else { 3825 entry = huge_ptep_get(src_pte); 3826 ptepage = pte_page(entry); 3827 get_page(ptepage); 3828 3829 /* 3830 * This is a rare case where we see pinned hugetlb 3831 * pages while they're prone to COW. We need to do the 3832 * COW earlier during fork. 3833 * 3834 * When pre-allocating the page or copying data, we 3835 * need to be without the pgtable locks since we could 3836 * sleep during the process. 3837 */ 3838 if (unlikely(page_needs_cow_for_dma(vma, ptepage))) { 3839 pte_t src_pte_old = entry; 3840 struct page *new; 3841 3842 spin_unlock(src_ptl); 3843 spin_unlock(dst_ptl); 3844 /* Do not use reserve as it's private owned */ 3845 new = alloc_huge_page(vma, addr, 1); 3846 if (IS_ERR(new)) { 3847 put_page(ptepage); 3848 ret = PTR_ERR(new); 3849 break; 3850 } 3851 copy_user_huge_page(new, ptepage, addr, vma, 3852 npages); 3853 put_page(ptepage); 3854 3855 /* Install the new huge page if src pte stable */ 3856 dst_ptl = huge_pte_lock(h, dst, dst_pte); 3857 src_ptl = huge_pte_lockptr(h, src, src_pte); 3858 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 3859 entry = huge_ptep_get(src_pte); 3860 if (!pte_same(src_pte_old, entry)) { 3861 put_page(new); 3862 /* dst_entry won't change as in child */ 3863 goto again; 3864 } 3865 hugetlb_install_page(vma, dst_pte, addr, new); 3866 spin_unlock(src_ptl); 3867 spin_unlock(dst_ptl); 3868 continue; 3869 } 3870 3871 if (cow) { 3872 /* 3873 * No need to notify as we are downgrading page 3874 * table protection not changing it to point 3875 * to a new page. 3876 * 3877 * See Documentation/vm/mmu_notifier.rst 3878 */ 3879 huge_ptep_set_wrprotect(src, addr, src_pte); 3880 } 3881 3882 page_dup_rmap(ptepage, true); 3883 set_huge_pte_at(dst, addr, dst_pte, entry); 3884 hugetlb_count_add(npages, dst); 3885 } 3886 spin_unlock(src_ptl); 3887 spin_unlock(dst_ptl); 3888 } 3889 3890 if (cow) 3891 mmu_notifier_invalidate_range_end(&range); 3892 else 3893 i_mmap_unlock_read(mapping); 3894 3895 return ret; 3896 } 3897 3898 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 3899 unsigned long start, unsigned long end, 3900 struct page *ref_page) 3901 { 3902 struct mm_struct *mm = vma->vm_mm; 3903 unsigned long address; 3904 pte_t *ptep; 3905 pte_t pte; 3906 spinlock_t *ptl; 3907 struct page *page; 3908 struct hstate *h = hstate_vma(vma); 3909 unsigned long sz = huge_page_size(h); 3910 struct mmu_notifier_range range; 3911 3912 WARN_ON(!is_vm_hugetlb_page(vma)); 3913 BUG_ON(start & ~huge_page_mask(h)); 3914 BUG_ON(end & ~huge_page_mask(h)); 3915 3916 /* 3917 * This is a hugetlb vma, all the pte entries should point 3918 * to huge page. 3919 */ 3920 tlb_change_page_size(tlb, sz); 3921 tlb_start_vma(tlb, vma); 3922 3923 /* 3924 * If sharing possible, alert mmu notifiers of worst case. 3925 */ 3926 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, 3927 end); 3928 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 3929 mmu_notifier_invalidate_range_start(&range); 3930 address = start; 3931 for (; address < end; address += sz) { 3932 ptep = huge_pte_offset(mm, address, sz); 3933 if (!ptep) 3934 continue; 3935 3936 ptl = huge_pte_lock(h, mm, ptep); 3937 if (huge_pmd_unshare(mm, vma, &address, ptep)) { 3938 spin_unlock(ptl); 3939 /* 3940 * We just unmapped a page of PMDs by clearing a PUD. 3941 * The caller's TLB flush range should cover this area. 3942 */ 3943 continue; 3944 } 3945 3946 pte = huge_ptep_get(ptep); 3947 if (huge_pte_none(pte)) { 3948 spin_unlock(ptl); 3949 continue; 3950 } 3951 3952 /* 3953 * Migrating hugepage or HWPoisoned hugepage is already 3954 * unmapped and its refcount is dropped, so just clear pte here. 3955 */ 3956 if (unlikely(!pte_present(pte))) { 3957 huge_pte_clear(mm, address, ptep, sz); 3958 spin_unlock(ptl); 3959 continue; 3960 } 3961 3962 page = pte_page(pte); 3963 /* 3964 * If a reference page is supplied, it is because a specific 3965 * page is being unmapped, not a range. Ensure the page we 3966 * are about to unmap is the actual page of interest. 3967 */ 3968 if (ref_page) { 3969 if (page != ref_page) { 3970 spin_unlock(ptl); 3971 continue; 3972 } 3973 /* 3974 * Mark the VMA as having unmapped its page so that 3975 * future faults in this VMA will fail rather than 3976 * looking like data was lost 3977 */ 3978 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 3979 } 3980 3981 pte = huge_ptep_get_and_clear(mm, address, ptep); 3982 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 3983 if (huge_pte_dirty(pte)) 3984 set_page_dirty(page); 3985 3986 hugetlb_count_sub(pages_per_huge_page(h), mm); 3987 page_remove_rmap(page, true); 3988 3989 spin_unlock(ptl); 3990 tlb_remove_page_size(tlb, page, huge_page_size(h)); 3991 /* 3992 * Bail out after unmapping reference page if supplied 3993 */ 3994 if (ref_page) 3995 break; 3996 } 3997 mmu_notifier_invalidate_range_end(&range); 3998 tlb_end_vma(tlb, vma); 3999 } 4000 4001 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 4002 struct vm_area_struct *vma, unsigned long start, 4003 unsigned long end, struct page *ref_page) 4004 { 4005 __unmap_hugepage_range(tlb, vma, start, end, ref_page); 4006 4007 /* 4008 * Clear this flag so that x86's huge_pmd_share page_table_shareable 4009 * test will fail on a vma being torn down, and not grab a page table 4010 * on its way out. We're lucky that the flag has such an appropriate 4011 * name, and can in fact be safely cleared here. We could clear it 4012 * before the __unmap_hugepage_range above, but all that's necessary 4013 * is to clear it before releasing the i_mmap_rwsem. This works 4014 * because in the context this is called, the VMA is about to be 4015 * destroyed and the i_mmap_rwsem is held. 4016 */ 4017 vma->vm_flags &= ~VM_MAYSHARE; 4018 } 4019 4020 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 4021 unsigned long end, struct page *ref_page) 4022 { 4023 struct mmu_gather tlb; 4024 4025 tlb_gather_mmu(&tlb, vma->vm_mm); 4026 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 4027 tlb_finish_mmu(&tlb); 4028 } 4029 4030 /* 4031 * This is called when the original mapper is failing to COW a MAP_PRIVATE 4032 * mapping it owns the reserve page for. The intention is to unmap the page 4033 * from other VMAs and let the children be SIGKILLed if they are faulting the 4034 * same region. 4035 */ 4036 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 4037 struct page *page, unsigned long address) 4038 { 4039 struct hstate *h = hstate_vma(vma); 4040 struct vm_area_struct *iter_vma; 4041 struct address_space *mapping; 4042 pgoff_t pgoff; 4043 4044 /* 4045 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 4046 * from page cache lookup which is in HPAGE_SIZE units. 4047 */ 4048 address = address & huge_page_mask(h); 4049 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 4050 vma->vm_pgoff; 4051 mapping = vma->vm_file->f_mapping; 4052 4053 /* 4054 * Take the mapping lock for the duration of the table walk. As 4055 * this mapping should be shared between all the VMAs, 4056 * __unmap_hugepage_range() is called as the lock is already held 4057 */ 4058 i_mmap_lock_write(mapping); 4059 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 4060 /* Do not unmap the current VMA */ 4061 if (iter_vma == vma) 4062 continue; 4063 4064 /* 4065 * Shared VMAs have their own reserves and do not affect 4066 * MAP_PRIVATE accounting but it is possible that a shared 4067 * VMA is using the same page so check and skip such VMAs. 4068 */ 4069 if (iter_vma->vm_flags & VM_MAYSHARE) 4070 continue; 4071 4072 /* 4073 * Unmap the page from other VMAs without their own reserves. 4074 * They get marked to be SIGKILLed if they fault in these 4075 * areas. This is because a future no-page fault on this VMA 4076 * could insert a zeroed page instead of the data existing 4077 * from the time of fork. This would look like data corruption 4078 */ 4079 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 4080 unmap_hugepage_range(iter_vma, address, 4081 address + huge_page_size(h), page); 4082 } 4083 i_mmap_unlock_write(mapping); 4084 } 4085 4086 /* 4087 * Hugetlb_cow() should be called with page lock of the original hugepage held. 4088 * Called with hugetlb_instantiation_mutex held and pte_page locked so we 4089 * cannot race with other handlers or page migration. 4090 * Keep the pte_same checks anyway to make transition from the mutex easier. 4091 */ 4092 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 4093 unsigned long address, pte_t *ptep, 4094 struct page *pagecache_page, spinlock_t *ptl) 4095 { 4096 pte_t pte; 4097 struct hstate *h = hstate_vma(vma); 4098 struct page *old_page, *new_page; 4099 int outside_reserve = 0; 4100 vm_fault_t ret = 0; 4101 unsigned long haddr = address & huge_page_mask(h); 4102 struct mmu_notifier_range range; 4103 4104 pte = huge_ptep_get(ptep); 4105 old_page = pte_page(pte); 4106 4107 retry_avoidcopy: 4108 /* If no-one else is actually using this page, avoid the copy 4109 * and just make the page writable */ 4110 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 4111 page_move_anon_rmap(old_page, vma); 4112 set_huge_ptep_writable(vma, haddr, ptep); 4113 return 0; 4114 } 4115 4116 /* 4117 * If the process that created a MAP_PRIVATE mapping is about to 4118 * perform a COW due to a shared page count, attempt to satisfy 4119 * the allocation without using the existing reserves. The pagecache 4120 * page is used to determine if the reserve at this address was 4121 * consumed or not. If reserves were used, a partial faulted mapping 4122 * at the time of fork() could consume its reserves on COW instead 4123 * of the full address range. 4124 */ 4125 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 4126 old_page != pagecache_page) 4127 outside_reserve = 1; 4128 4129 get_page(old_page); 4130 4131 /* 4132 * Drop page table lock as buddy allocator may be called. It will 4133 * be acquired again before returning to the caller, as expected. 4134 */ 4135 spin_unlock(ptl); 4136 new_page = alloc_huge_page(vma, haddr, outside_reserve); 4137 4138 if (IS_ERR(new_page)) { 4139 /* 4140 * If a process owning a MAP_PRIVATE mapping fails to COW, 4141 * it is due to references held by a child and an insufficient 4142 * huge page pool. To guarantee the original mappers 4143 * reliability, unmap the page from child processes. The child 4144 * may get SIGKILLed if it later faults. 4145 */ 4146 if (outside_reserve) { 4147 struct address_space *mapping = vma->vm_file->f_mapping; 4148 pgoff_t idx; 4149 u32 hash; 4150 4151 put_page(old_page); 4152 BUG_ON(huge_pte_none(pte)); 4153 /* 4154 * Drop hugetlb_fault_mutex and i_mmap_rwsem before 4155 * unmapping. unmapping needs to hold i_mmap_rwsem 4156 * in write mode. Dropping i_mmap_rwsem in read mode 4157 * here is OK as COW mappings do not interact with 4158 * PMD sharing. 4159 * 4160 * Reacquire both after unmap operation. 4161 */ 4162 idx = vma_hugecache_offset(h, vma, haddr); 4163 hash = hugetlb_fault_mutex_hash(mapping, idx); 4164 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4165 i_mmap_unlock_read(mapping); 4166 4167 unmap_ref_private(mm, vma, old_page, haddr); 4168 4169 i_mmap_lock_read(mapping); 4170 mutex_lock(&hugetlb_fault_mutex_table[hash]); 4171 spin_lock(ptl); 4172 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 4173 if (likely(ptep && 4174 pte_same(huge_ptep_get(ptep), pte))) 4175 goto retry_avoidcopy; 4176 /* 4177 * race occurs while re-acquiring page table 4178 * lock, and our job is done. 4179 */ 4180 return 0; 4181 } 4182 4183 ret = vmf_error(PTR_ERR(new_page)); 4184 goto out_release_old; 4185 } 4186 4187 /* 4188 * When the original hugepage is shared one, it does not have 4189 * anon_vma prepared. 4190 */ 4191 if (unlikely(anon_vma_prepare(vma))) { 4192 ret = VM_FAULT_OOM; 4193 goto out_release_all; 4194 } 4195 4196 copy_user_huge_page(new_page, old_page, address, vma, 4197 pages_per_huge_page(h)); 4198 __SetPageUptodate(new_page); 4199 4200 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, 4201 haddr + huge_page_size(h)); 4202 mmu_notifier_invalidate_range_start(&range); 4203 4204 /* 4205 * Retake the page table lock to check for racing updates 4206 * before the page tables are altered 4207 */ 4208 spin_lock(ptl); 4209 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 4210 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 4211 ClearHPageRestoreReserve(new_page); 4212 4213 /* Break COW */ 4214 huge_ptep_clear_flush(vma, haddr, ptep); 4215 mmu_notifier_invalidate_range(mm, range.start, range.end); 4216 set_huge_pte_at(mm, haddr, ptep, 4217 make_huge_pte(vma, new_page, 1)); 4218 page_remove_rmap(old_page, true); 4219 hugepage_add_new_anon_rmap(new_page, vma, haddr); 4220 SetHPageMigratable(new_page); 4221 /* Make the old page be freed below */ 4222 new_page = old_page; 4223 } 4224 spin_unlock(ptl); 4225 mmu_notifier_invalidate_range_end(&range); 4226 out_release_all: 4227 restore_reserve_on_error(h, vma, haddr, new_page); 4228 put_page(new_page); 4229 out_release_old: 4230 put_page(old_page); 4231 4232 spin_lock(ptl); /* Caller expects lock to be held */ 4233 return ret; 4234 } 4235 4236 /* Return the pagecache page at a given address within a VMA */ 4237 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 4238 struct vm_area_struct *vma, unsigned long address) 4239 { 4240 struct address_space *mapping; 4241 pgoff_t idx; 4242 4243 mapping = vma->vm_file->f_mapping; 4244 idx = vma_hugecache_offset(h, vma, address); 4245 4246 return find_lock_page(mapping, idx); 4247 } 4248 4249 /* 4250 * Return whether there is a pagecache page to back given address within VMA. 4251 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 4252 */ 4253 static bool hugetlbfs_pagecache_present(struct hstate *h, 4254 struct vm_area_struct *vma, unsigned long address) 4255 { 4256 struct address_space *mapping; 4257 pgoff_t idx; 4258 struct page *page; 4259 4260 mapping = vma->vm_file->f_mapping; 4261 idx = vma_hugecache_offset(h, vma, address); 4262 4263 page = find_get_page(mapping, idx); 4264 if (page) 4265 put_page(page); 4266 return page != NULL; 4267 } 4268 4269 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 4270 pgoff_t idx) 4271 { 4272 struct inode *inode = mapping->host; 4273 struct hstate *h = hstate_inode(inode); 4274 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 4275 4276 if (err) 4277 return err; 4278 ClearHPageRestoreReserve(page); 4279 4280 /* 4281 * set page dirty so that it will not be removed from cache/file 4282 * by non-hugetlbfs specific code paths. 4283 */ 4284 set_page_dirty(page); 4285 4286 spin_lock(&inode->i_lock); 4287 inode->i_blocks += blocks_per_huge_page(h); 4288 spin_unlock(&inode->i_lock); 4289 return 0; 4290 } 4291 4292 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 4293 struct vm_area_struct *vma, 4294 struct address_space *mapping, pgoff_t idx, 4295 unsigned long address, pte_t *ptep, unsigned int flags) 4296 { 4297 struct hstate *h = hstate_vma(vma); 4298 vm_fault_t ret = VM_FAULT_SIGBUS; 4299 int anon_rmap = 0; 4300 unsigned long size; 4301 struct page *page; 4302 pte_t new_pte; 4303 spinlock_t *ptl; 4304 unsigned long haddr = address & huge_page_mask(h); 4305 bool new_page = false; 4306 4307 /* 4308 * Currently, we are forced to kill the process in the event the 4309 * original mapper has unmapped pages from the child due to a failed 4310 * COW. Warn that such a situation has occurred as it may not be obvious 4311 */ 4312 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 4313 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 4314 current->pid); 4315 return ret; 4316 } 4317 4318 /* 4319 * We can not race with truncation due to holding i_mmap_rwsem. 4320 * i_size is modified when holding i_mmap_rwsem, so check here 4321 * once for faults beyond end of file. 4322 */ 4323 size = i_size_read(mapping->host) >> huge_page_shift(h); 4324 if (idx >= size) 4325 goto out; 4326 4327 retry: 4328 page = find_lock_page(mapping, idx); 4329 if (!page) { 4330 /* 4331 * Check for page in userfault range 4332 */ 4333 if (userfaultfd_missing(vma)) { 4334 u32 hash; 4335 struct vm_fault vmf = { 4336 .vma = vma, 4337 .address = haddr, 4338 .flags = flags, 4339 /* 4340 * Hard to debug if it ends up being 4341 * used by a callee that assumes 4342 * something about the other 4343 * uninitialized fields... same as in 4344 * memory.c 4345 */ 4346 }; 4347 4348 /* 4349 * hugetlb_fault_mutex and i_mmap_rwsem must be 4350 * dropped before handling userfault. Reacquire 4351 * after handling fault to make calling code simpler. 4352 */ 4353 hash = hugetlb_fault_mutex_hash(mapping, idx); 4354 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4355 i_mmap_unlock_read(mapping); 4356 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 4357 i_mmap_lock_read(mapping); 4358 mutex_lock(&hugetlb_fault_mutex_table[hash]); 4359 goto out; 4360 } 4361 4362 page = alloc_huge_page(vma, haddr, 0); 4363 if (IS_ERR(page)) { 4364 /* 4365 * Returning error will result in faulting task being 4366 * sent SIGBUS. The hugetlb fault mutex prevents two 4367 * tasks from racing to fault in the same page which 4368 * could result in false unable to allocate errors. 4369 * Page migration does not take the fault mutex, but 4370 * does a clear then write of pte's under page table 4371 * lock. Page fault code could race with migration, 4372 * notice the clear pte and try to allocate a page 4373 * here. Before returning error, get ptl and make 4374 * sure there really is no pte entry. 4375 */ 4376 ptl = huge_pte_lock(h, mm, ptep); 4377 if (!huge_pte_none(huge_ptep_get(ptep))) { 4378 ret = 0; 4379 spin_unlock(ptl); 4380 goto out; 4381 } 4382 spin_unlock(ptl); 4383 ret = vmf_error(PTR_ERR(page)); 4384 goto out; 4385 } 4386 clear_huge_page(page, address, pages_per_huge_page(h)); 4387 __SetPageUptodate(page); 4388 new_page = true; 4389 4390 if (vma->vm_flags & VM_MAYSHARE) { 4391 int err = huge_add_to_page_cache(page, mapping, idx); 4392 if (err) { 4393 put_page(page); 4394 if (err == -EEXIST) 4395 goto retry; 4396 goto out; 4397 } 4398 } else { 4399 lock_page(page); 4400 if (unlikely(anon_vma_prepare(vma))) { 4401 ret = VM_FAULT_OOM; 4402 goto backout_unlocked; 4403 } 4404 anon_rmap = 1; 4405 } 4406 } else { 4407 /* 4408 * If memory error occurs between mmap() and fault, some process 4409 * don't have hwpoisoned swap entry for errored virtual address. 4410 * So we need to block hugepage fault by PG_hwpoison bit check. 4411 */ 4412 if (unlikely(PageHWPoison(page))) { 4413 ret = VM_FAULT_HWPOISON_LARGE | 4414 VM_FAULT_SET_HINDEX(hstate_index(h)); 4415 goto backout_unlocked; 4416 } 4417 } 4418 4419 /* 4420 * If we are going to COW a private mapping later, we examine the 4421 * pending reservations for this page now. This will ensure that 4422 * any allocations necessary to record that reservation occur outside 4423 * the spinlock. 4424 */ 4425 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 4426 if (vma_needs_reservation(h, vma, haddr) < 0) { 4427 ret = VM_FAULT_OOM; 4428 goto backout_unlocked; 4429 } 4430 /* Just decrements count, does not deallocate */ 4431 vma_end_reservation(h, vma, haddr); 4432 } 4433 4434 ptl = huge_pte_lock(h, mm, ptep); 4435 ret = 0; 4436 if (!huge_pte_none(huge_ptep_get(ptep))) 4437 goto backout; 4438 4439 if (anon_rmap) { 4440 ClearHPageRestoreReserve(page); 4441 hugepage_add_new_anon_rmap(page, vma, haddr); 4442 } else 4443 page_dup_rmap(page, true); 4444 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 4445 && (vma->vm_flags & VM_SHARED))); 4446 set_huge_pte_at(mm, haddr, ptep, new_pte); 4447 4448 hugetlb_count_add(pages_per_huge_page(h), mm); 4449 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 4450 /* Optimization, do the COW without a second fault */ 4451 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); 4452 } 4453 4454 spin_unlock(ptl); 4455 4456 /* 4457 * Only set HPageMigratable in newly allocated pages. Existing pages 4458 * found in the pagecache may not have HPageMigratableset if they have 4459 * been isolated for migration. 4460 */ 4461 if (new_page) 4462 SetHPageMigratable(page); 4463 4464 unlock_page(page); 4465 out: 4466 return ret; 4467 4468 backout: 4469 spin_unlock(ptl); 4470 backout_unlocked: 4471 unlock_page(page); 4472 restore_reserve_on_error(h, vma, haddr, page); 4473 put_page(page); 4474 goto out; 4475 } 4476 4477 #ifdef CONFIG_SMP 4478 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 4479 { 4480 unsigned long key[2]; 4481 u32 hash; 4482 4483 key[0] = (unsigned long) mapping; 4484 key[1] = idx; 4485 4486 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 4487 4488 return hash & (num_fault_mutexes - 1); 4489 } 4490 #else 4491 /* 4492 * For uniprocessor systems we always use a single mutex, so just 4493 * return 0 and avoid the hashing overhead. 4494 */ 4495 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 4496 { 4497 return 0; 4498 } 4499 #endif 4500 4501 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 4502 unsigned long address, unsigned int flags) 4503 { 4504 pte_t *ptep, entry; 4505 spinlock_t *ptl; 4506 vm_fault_t ret; 4507 u32 hash; 4508 pgoff_t idx; 4509 struct page *page = NULL; 4510 struct page *pagecache_page = NULL; 4511 struct hstate *h = hstate_vma(vma); 4512 struct address_space *mapping; 4513 int need_wait_lock = 0; 4514 unsigned long haddr = address & huge_page_mask(h); 4515 4516 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 4517 if (ptep) { 4518 /* 4519 * Since we hold no locks, ptep could be stale. That is 4520 * OK as we are only making decisions based on content and 4521 * not actually modifying content here. 4522 */ 4523 entry = huge_ptep_get(ptep); 4524 if (unlikely(is_hugetlb_entry_migration(entry))) { 4525 migration_entry_wait_huge(vma, mm, ptep); 4526 return 0; 4527 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 4528 return VM_FAULT_HWPOISON_LARGE | 4529 VM_FAULT_SET_HINDEX(hstate_index(h)); 4530 } 4531 4532 /* 4533 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold 4534 * until finished with ptep. This serves two purposes: 4535 * 1) It prevents huge_pmd_unshare from being called elsewhere 4536 * and making the ptep no longer valid. 4537 * 2) It synchronizes us with i_size modifications during truncation. 4538 * 4539 * ptep could have already be assigned via huge_pte_offset. That 4540 * is OK, as huge_pte_alloc will return the same value unless 4541 * something has changed. 4542 */ 4543 mapping = vma->vm_file->f_mapping; 4544 i_mmap_lock_read(mapping); 4545 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); 4546 if (!ptep) { 4547 i_mmap_unlock_read(mapping); 4548 return VM_FAULT_OOM; 4549 } 4550 4551 /* 4552 * Serialize hugepage allocation and instantiation, so that we don't 4553 * get spurious allocation failures if two CPUs race to instantiate 4554 * the same page in the page cache. 4555 */ 4556 idx = vma_hugecache_offset(h, vma, haddr); 4557 hash = hugetlb_fault_mutex_hash(mapping, idx); 4558 mutex_lock(&hugetlb_fault_mutex_table[hash]); 4559 4560 entry = huge_ptep_get(ptep); 4561 if (huge_pte_none(entry)) { 4562 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); 4563 goto out_mutex; 4564 } 4565 4566 ret = 0; 4567 4568 /* 4569 * entry could be a migration/hwpoison entry at this point, so this 4570 * check prevents the kernel from going below assuming that we have 4571 * an active hugepage in pagecache. This goto expects the 2nd page 4572 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 4573 * properly handle it. 4574 */ 4575 if (!pte_present(entry)) 4576 goto out_mutex; 4577 4578 /* 4579 * If we are going to COW the mapping later, we examine the pending 4580 * reservations for this page now. This will ensure that any 4581 * allocations necessary to record that reservation occur outside the 4582 * spinlock. For private mappings, we also lookup the pagecache 4583 * page now as it is used to determine if a reservation has been 4584 * consumed. 4585 */ 4586 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 4587 if (vma_needs_reservation(h, vma, haddr) < 0) { 4588 ret = VM_FAULT_OOM; 4589 goto out_mutex; 4590 } 4591 /* Just decrements count, does not deallocate */ 4592 vma_end_reservation(h, vma, haddr); 4593 4594 if (!(vma->vm_flags & VM_MAYSHARE)) 4595 pagecache_page = hugetlbfs_pagecache_page(h, 4596 vma, haddr); 4597 } 4598 4599 ptl = huge_pte_lock(h, mm, ptep); 4600 4601 /* Check for a racing update before calling hugetlb_cow */ 4602 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 4603 goto out_ptl; 4604 4605 /* 4606 * hugetlb_cow() requires page locks of pte_page(entry) and 4607 * pagecache_page, so here we need take the former one 4608 * when page != pagecache_page or !pagecache_page. 4609 */ 4610 page = pte_page(entry); 4611 if (page != pagecache_page) 4612 if (!trylock_page(page)) { 4613 need_wait_lock = 1; 4614 goto out_ptl; 4615 } 4616 4617 get_page(page); 4618 4619 if (flags & FAULT_FLAG_WRITE) { 4620 if (!huge_pte_write(entry)) { 4621 ret = hugetlb_cow(mm, vma, address, ptep, 4622 pagecache_page, ptl); 4623 goto out_put_page; 4624 } 4625 entry = huge_pte_mkdirty(entry); 4626 } 4627 entry = pte_mkyoung(entry); 4628 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 4629 flags & FAULT_FLAG_WRITE)) 4630 update_mmu_cache(vma, haddr, ptep); 4631 out_put_page: 4632 if (page != pagecache_page) 4633 unlock_page(page); 4634 put_page(page); 4635 out_ptl: 4636 spin_unlock(ptl); 4637 4638 if (pagecache_page) { 4639 unlock_page(pagecache_page); 4640 put_page(pagecache_page); 4641 } 4642 out_mutex: 4643 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4644 i_mmap_unlock_read(mapping); 4645 /* 4646 * Generally it's safe to hold refcount during waiting page lock. But 4647 * here we just wait to defer the next page fault to avoid busy loop and 4648 * the page is not used after unlocked before returning from the current 4649 * page fault. So we are safe from accessing freed page, even if we wait 4650 * here without taking refcount. 4651 */ 4652 if (need_wait_lock) 4653 wait_on_page_locked(page); 4654 return ret; 4655 } 4656 4657 /* 4658 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 4659 * modifications for huge pages. 4660 */ 4661 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 4662 pte_t *dst_pte, 4663 struct vm_area_struct *dst_vma, 4664 unsigned long dst_addr, 4665 unsigned long src_addr, 4666 struct page **pagep) 4667 { 4668 struct address_space *mapping; 4669 pgoff_t idx; 4670 unsigned long size; 4671 int vm_shared = dst_vma->vm_flags & VM_SHARED; 4672 struct hstate *h = hstate_vma(dst_vma); 4673 pte_t _dst_pte; 4674 spinlock_t *ptl; 4675 int ret; 4676 struct page *page; 4677 4678 if (!*pagep) { 4679 ret = -ENOMEM; 4680 page = alloc_huge_page(dst_vma, dst_addr, 0); 4681 if (IS_ERR(page)) 4682 goto out; 4683 4684 ret = copy_huge_page_from_user(page, 4685 (const void __user *) src_addr, 4686 pages_per_huge_page(h), false); 4687 4688 /* fallback to copy_from_user outside mmap_lock */ 4689 if (unlikely(ret)) { 4690 ret = -ENOENT; 4691 *pagep = page; 4692 /* don't free the page */ 4693 goto out; 4694 } 4695 } else { 4696 page = *pagep; 4697 *pagep = NULL; 4698 } 4699 4700 /* 4701 * The memory barrier inside __SetPageUptodate makes sure that 4702 * preceding stores to the page contents become visible before 4703 * the set_pte_at() write. 4704 */ 4705 __SetPageUptodate(page); 4706 4707 mapping = dst_vma->vm_file->f_mapping; 4708 idx = vma_hugecache_offset(h, dst_vma, dst_addr); 4709 4710 /* 4711 * If shared, add to page cache 4712 */ 4713 if (vm_shared) { 4714 size = i_size_read(mapping->host) >> huge_page_shift(h); 4715 ret = -EFAULT; 4716 if (idx >= size) 4717 goto out_release_nounlock; 4718 4719 /* 4720 * Serialization between remove_inode_hugepages() and 4721 * huge_add_to_page_cache() below happens through the 4722 * hugetlb_fault_mutex_table that here must be hold by 4723 * the caller. 4724 */ 4725 ret = huge_add_to_page_cache(page, mapping, idx); 4726 if (ret) 4727 goto out_release_nounlock; 4728 } 4729 4730 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); 4731 spin_lock(ptl); 4732 4733 /* 4734 * Recheck the i_size after holding PT lock to make sure not 4735 * to leave any page mapped (as page_mapped()) beyond the end 4736 * of the i_size (remove_inode_hugepages() is strict about 4737 * enforcing that). If we bail out here, we'll also leave a 4738 * page in the radix tree in the vm_shared case beyond the end 4739 * of the i_size, but remove_inode_hugepages() will take care 4740 * of it as soon as we drop the hugetlb_fault_mutex_table. 4741 */ 4742 size = i_size_read(mapping->host) >> huge_page_shift(h); 4743 ret = -EFAULT; 4744 if (idx >= size) 4745 goto out_release_unlock; 4746 4747 ret = -EEXIST; 4748 if (!huge_pte_none(huge_ptep_get(dst_pte))) 4749 goto out_release_unlock; 4750 4751 if (vm_shared) { 4752 page_dup_rmap(page, true); 4753 } else { 4754 ClearHPageRestoreReserve(page); 4755 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 4756 } 4757 4758 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); 4759 if (dst_vma->vm_flags & VM_WRITE) 4760 _dst_pte = huge_pte_mkdirty(_dst_pte); 4761 _dst_pte = pte_mkyoung(_dst_pte); 4762 4763 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 4764 4765 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, 4766 dst_vma->vm_flags & VM_WRITE); 4767 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 4768 4769 /* No need to invalidate - it was non-present before */ 4770 update_mmu_cache(dst_vma, dst_addr, dst_pte); 4771 4772 spin_unlock(ptl); 4773 SetHPageMigratable(page); 4774 if (vm_shared) 4775 unlock_page(page); 4776 ret = 0; 4777 out: 4778 return ret; 4779 out_release_unlock: 4780 spin_unlock(ptl); 4781 if (vm_shared) 4782 unlock_page(page); 4783 out_release_nounlock: 4784 put_page(page); 4785 goto out; 4786 } 4787 4788 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, 4789 int refs, struct page **pages, 4790 struct vm_area_struct **vmas) 4791 { 4792 int nr; 4793 4794 for (nr = 0; nr < refs; nr++) { 4795 if (likely(pages)) 4796 pages[nr] = mem_map_offset(page, nr); 4797 if (vmas) 4798 vmas[nr] = vma; 4799 } 4800 } 4801 4802 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 4803 struct page **pages, struct vm_area_struct **vmas, 4804 unsigned long *position, unsigned long *nr_pages, 4805 long i, unsigned int flags, int *locked) 4806 { 4807 unsigned long pfn_offset; 4808 unsigned long vaddr = *position; 4809 unsigned long remainder = *nr_pages; 4810 struct hstate *h = hstate_vma(vma); 4811 int err = -EFAULT, refs; 4812 4813 while (vaddr < vma->vm_end && remainder) { 4814 pte_t *pte; 4815 spinlock_t *ptl = NULL; 4816 int absent; 4817 struct page *page; 4818 4819 /* 4820 * If we have a pending SIGKILL, don't keep faulting pages and 4821 * potentially allocating memory. 4822 */ 4823 if (fatal_signal_pending(current)) { 4824 remainder = 0; 4825 break; 4826 } 4827 4828 /* 4829 * Some archs (sparc64, sh*) have multiple pte_ts to 4830 * each hugepage. We have to make sure we get the 4831 * first, for the page indexing below to work. 4832 * 4833 * Note that page table lock is not held when pte is null. 4834 */ 4835 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 4836 huge_page_size(h)); 4837 if (pte) 4838 ptl = huge_pte_lock(h, mm, pte); 4839 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 4840 4841 /* 4842 * When coredumping, it suits get_dump_page if we just return 4843 * an error where there's an empty slot with no huge pagecache 4844 * to back it. This way, we avoid allocating a hugepage, and 4845 * the sparse dumpfile avoids allocating disk blocks, but its 4846 * huge holes still show up with zeroes where they need to be. 4847 */ 4848 if (absent && (flags & FOLL_DUMP) && 4849 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 4850 if (pte) 4851 spin_unlock(ptl); 4852 remainder = 0; 4853 break; 4854 } 4855 4856 /* 4857 * We need call hugetlb_fault for both hugepages under migration 4858 * (in which case hugetlb_fault waits for the migration,) and 4859 * hwpoisoned hugepages (in which case we need to prevent the 4860 * caller from accessing to them.) In order to do this, we use 4861 * here is_swap_pte instead of is_hugetlb_entry_migration and 4862 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 4863 * both cases, and because we can't follow correct pages 4864 * directly from any kind of swap entries. 4865 */ 4866 if (absent || is_swap_pte(huge_ptep_get(pte)) || 4867 ((flags & FOLL_WRITE) && 4868 !huge_pte_write(huge_ptep_get(pte)))) { 4869 vm_fault_t ret; 4870 unsigned int fault_flags = 0; 4871 4872 if (pte) 4873 spin_unlock(ptl); 4874 if (flags & FOLL_WRITE) 4875 fault_flags |= FAULT_FLAG_WRITE; 4876 if (locked) 4877 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 4878 FAULT_FLAG_KILLABLE; 4879 if (flags & FOLL_NOWAIT) 4880 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 4881 FAULT_FLAG_RETRY_NOWAIT; 4882 if (flags & FOLL_TRIED) { 4883 /* 4884 * Note: FAULT_FLAG_ALLOW_RETRY and 4885 * FAULT_FLAG_TRIED can co-exist 4886 */ 4887 fault_flags |= FAULT_FLAG_TRIED; 4888 } 4889 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4890 if (ret & VM_FAULT_ERROR) { 4891 err = vm_fault_to_errno(ret, flags); 4892 remainder = 0; 4893 break; 4894 } 4895 if (ret & VM_FAULT_RETRY) { 4896 if (locked && 4897 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 4898 *locked = 0; 4899 *nr_pages = 0; 4900 /* 4901 * VM_FAULT_RETRY must not return an 4902 * error, it will return zero 4903 * instead. 4904 * 4905 * No need to update "position" as the 4906 * caller will not check it after 4907 * *nr_pages is set to 0. 4908 */ 4909 return i; 4910 } 4911 continue; 4912 } 4913 4914 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 4915 page = pte_page(huge_ptep_get(pte)); 4916 4917 /* 4918 * If subpage information not requested, update counters 4919 * and skip the same_page loop below. 4920 */ 4921 if (!pages && !vmas && !pfn_offset && 4922 (vaddr + huge_page_size(h) < vma->vm_end) && 4923 (remainder >= pages_per_huge_page(h))) { 4924 vaddr += huge_page_size(h); 4925 remainder -= pages_per_huge_page(h); 4926 i += pages_per_huge_page(h); 4927 spin_unlock(ptl); 4928 continue; 4929 } 4930 4931 refs = min3(pages_per_huge_page(h) - pfn_offset, 4932 (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder); 4933 4934 if (pages || vmas) 4935 record_subpages_vmas(mem_map_offset(page, pfn_offset), 4936 vma, refs, 4937 likely(pages) ? pages + i : NULL, 4938 vmas ? vmas + i : NULL); 4939 4940 if (pages) { 4941 /* 4942 * try_grab_compound_head() should always succeed here, 4943 * because: a) we hold the ptl lock, and b) we've just 4944 * checked that the huge page is present in the page 4945 * tables. If the huge page is present, then the tail 4946 * pages must also be present. The ptl prevents the 4947 * head page and tail pages from being rearranged in 4948 * any way. So this page must be available at this 4949 * point, unless the page refcount overflowed: 4950 */ 4951 if (WARN_ON_ONCE(!try_grab_compound_head(pages[i], 4952 refs, 4953 flags))) { 4954 spin_unlock(ptl); 4955 remainder = 0; 4956 err = -ENOMEM; 4957 break; 4958 } 4959 } 4960 4961 vaddr += (refs << PAGE_SHIFT); 4962 remainder -= refs; 4963 i += refs; 4964 4965 spin_unlock(ptl); 4966 } 4967 *nr_pages = remainder; 4968 /* 4969 * setting position is actually required only if remainder is 4970 * not zero but it's faster not to add a "if (remainder)" 4971 * branch. 4972 */ 4973 *position = vaddr; 4974 4975 return i ? i : err; 4976 } 4977 4978 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 4979 /* 4980 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 4981 * implement this. 4982 */ 4983 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 4984 #endif 4985 4986 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 4987 unsigned long address, unsigned long end, pgprot_t newprot) 4988 { 4989 struct mm_struct *mm = vma->vm_mm; 4990 unsigned long start = address; 4991 pte_t *ptep; 4992 pte_t pte; 4993 struct hstate *h = hstate_vma(vma); 4994 unsigned long pages = 0; 4995 bool shared_pmd = false; 4996 struct mmu_notifier_range range; 4997 4998 /* 4999 * In the case of shared PMDs, the area to flush could be beyond 5000 * start/end. Set range.start/range.end to cover the maximum possible 5001 * range if PMD sharing is possible. 5002 */ 5003 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 5004 0, vma, mm, start, end); 5005 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5006 5007 BUG_ON(address >= end); 5008 flush_cache_range(vma, range.start, range.end); 5009 5010 mmu_notifier_invalidate_range_start(&range); 5011 i_mmap_lock_write(vma->vm_file->f_mapping); 5012 for (; address < end; address += huge_page_size(h)) { 5013 spinlock_t *ptl; 5014 ptep = huge_pte_offset(mm, address, huge_page_size(h)); 5015 if (!ptep) 5016 continue; 5017 ptl = huge_pte_lock(h, mm, ptep); 5018 if (huge_pmd_unshare(mm, vma, &address, ptep)) { 5019 pages++; 5020 spin_unlock(ptl); 5021 shared_pmd = true; 5022 continue; 5023 } 5024 pte = huge_ptep_get(ptep); 5025 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 5026 spin_unlock(ptl); 5027 continue; 5028 } 5029 if (unlikely(is_hugetlb_entry_migration(pte))) { 5030 swp_entry_t entry = pte_to_swp_entry(pte); 5031 5032 if (is_write_migration_entry(entry)) { 5033 pte_t newpte; 5034 5035 make_migration_entry_read(&entry); 5036 newpte = swp_entry_to_pte(entry); 5037 set_huge_swap_pte_at(mm, address, ptep, 5038 newpte, huge_page_size(h)); 5039 pages++; 5040 } 5041 spin_unlock(ptl); 5042 continue; 5043 } 5044 if (!huge_pte_none(pte)) { 5045 pte_t old_pte; 5046 5047 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 5048 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot)); 5049 pte = arch_make_huge_pte(pte, vma, NULL, 0); 5050 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 5051 pages++; 5052 } 5053 spin_unlock(ptl); 5054 } 5055 /* 5056 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 5057 * may have cleared our pud entry and done put_page on the page table: 5058 * once we release i_mmap_rwsem, another task can do the final put_page 5059 * and that page table be reused and filled with junk. If we actually 5060 * did unshare a page of pmds, flush the range corresponding to the pud. 5061 */ 5062 if (shared_pmd) 5063 flush_hugetlb_tlb_range(vma, range.start, range.end); 5064 else 5065 flush_hugetlb_tlb_range(vma, start, end); 5066 /* 5067 * No need to call mmu_notifier_invalidate_range() we are downgrading 5068 * page table protection not changing it to point to a new page. 5069 * 5070 * See Documentation/vm/mmu_notifier.rst 5071 */ 5072 i_mmap_unlock_write(vma->vm_file->f_mapping); 5073 mmu_notifier_invalidate_range_end(&range); 5074 5075 return pages << h->order; 5076 } 5077 5078 /* Return true if reservation was successful, false otherwise. */ 5079 bool hugetlb_reserve_pages(struct inode *inode, 5080 long from, long to, 5081 struct vm_area_struct *vma, 5082 vm_flags_t vm_flags) 5083 { 5084 long chg, add = -1; 5085 struct hstate *h = hstate_inode(inode); 5086 struct hugepage_subpool *spool = subpool_inode(inode); 5087 struct resv_map *resv_map; 5088 struct hugetlb_cgroup *h_cg = NULL; 5089 long gbl_reserve, regions_needed = 0; 5090 5091 /* This should never happen */ 5092 if (from > to) { 5093 VM_WARN(1, "%s called with a negative range\n", __func__); 5094 return false; 5095 } 5096 5097 /* 5098 * Only apply hugepage reservation if asked. At fault time, an 5099 * attempt will be made for VM_NORESERVE to allocate a page 5100 * without using reserves 5101 */ 5102 if (vm_flags & VM_NORESERVE) 5103 return true; 5104 5105 /* 5106 * Shared mappings base their reservation on the number of pages that 5107 * are already allocated on behalf of the file. Private mappings need 5108 * to reserve the full area even if read-only as mprotect() may be 5109 * called to make the mapping read-write. Assume !vma is a shm mapping 5110 */ 5111 if (!vma || vma->vm_flags & VM_MAYSHARE) { 5112 /* 5113 * resv_map can not be NULL as hugetlb_reserve_pages is only 5114 * called for inodes for which resv_maps were created (see 5115 * hugetlbfs_get_inode). 5116 */ 5117 resv_map = inode_resv_map(inode); 5118 5119 chg = region_chg(resv_map, from, to, ®ions_needed); 5120 5121 } else { 5122 /* Private mapping. */ 5123 resv_map = resv_map_alloc(); 5124 if (!resv_map) 5125 return false; 5126 5127 chg = to - from; 5128 5129 set_vma_resv_map(vma, resv_map); 5130 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 5131 } 5132 5133 if (chg < 0) 5134 goto out_err; 5135 5136 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 5137 chg * pages_per_huge_page(h), &h_cg) < 0) 5138 goto out_err; 5139 5140 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 5141 /* For private mappings, the hugetlb_cgroup uncharge info hangs 5142 * of the resv_map. 5143 */ 5144 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 5145 } 5146 5147 /* 5148 * There must be enough pages in the subpool for the mapping. If 5149 * the subpool has a minimum size, there may be some global 5150 * reservations already in place (gbl_reserve). 5151 */ 5152 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 5153 if (gbl_reserve < 0) 5154 goto out_uncharge_cgroup; 5155 5156 /* 5157 * Check enough hugepages are available for the reservation. 5158 * Hand the pages back to the subpool if there are not 5159 */ 5160 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 5161 goto out_put_pages; 5162 5163 /* 5164 * Account for the reservations made. Shared mappings record regions 5165 * that have reservations as they are shared by multiple VMAs. 5166 * When the last VMA disappears, the region map says how much 5167 * the reservation was and the page cache tells how much of 5168 * the reservation was consumed. Private mappings are per-VMA and 5169 * only the consumed reservations are tracked. When the VMA 5170 * disappears, the original reservation is the VMA size and the 5171 * consumed reservations are stored in the map. Hence, nothing 5172 * else has to be done for private mappings here 5173 */ 5174 if (!vma || vma->vm_flags & VM_MAYSHARE) { 5175 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 5176 5177 if (unlikely(add < 0)) { 5178 hugetlb_acct_memory(h, -gbl_reserve); 5179 goto out_put_pages; 5180 } else if (unlikely(chg > add)) { 5181 /* 5182 * pages in this range were added to the reserve 5183 * map between region_chg and region_add. This 5184 * indicates a race with alloc_huge_page. Adjust 5185 * the subpool and reserve counts modified above 5186 * based on the difference. 5187 */ 5188 long rsv_adjust; 5189 5190 hugetlb_cgroup_uncharge_cgroup_rsvd( 5191 hstate_index(h), 5192 (chg - add) * pages_per_huge_page(h), h_cg); 5193 5194 rsv_adjust = hugepage_subpool_put_pages(spool, 5195 chg - add); 5196 hugetlb_acct_memory(h, -rsv_adjust); 5197 } 5198 } 5199 return true; 5200 5201 out_put_pages: 5202 /* put back original number of pages, chg */ 5203 (void)hugepage_subpool_put_pages(spool, chg); 5204 out_uncharge_cgroup: 5205 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 5206 chg * pages_per_huge_page(h), h_cg); 5207 out_err: 5208 if (!vma || vma->vm_flags & VM_MAYSHARE) 5209 /* Only call region_abort if the region_chg succeeded but the 5210 * region_add failed or didn't run. 5211 */ 5212 if (chg >= 0 && add < 0) 5213 region_abort(resv_map, from, to, regions_needed); 5214 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 5215 kref_put(&resv_map->refs, resv_map_release); 5216 return false; 5217 } 5218 5219 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 5220 long freed) 5221 { 5222 struct hstate *h = hstate_inode(inode); 5223 struct resv_map *resv_map = inode_resv_map(inode); 5224 long chg = 0; 5225 struct hugepage_subpool *spool = subpool_inode(inode); 5226 long gbl_reserve; 5227 5228 /* 5229 * Since this routine can be called in the evict inode path for all 5230 * hugetlbfs inodes, resv_map could be NULL. 5231 */ 5232 if (resv_map) { 5233 chg = region_del(resv_map, start, end); 5234 /* 5235 * region_del() can fail in the rare case where a region 5236 * must be split and another region descriptor can not be 5237 * allocated. If end == LONG_MAX, it will not fail. 5238 */ 5239 if (chg < 0) 5240 return chg; 5241 } 5242 5243 spin_lock(&inode->i_lock); 5244 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 5245 spin_unlock(&inode->i_lock); 5246 5247 /* 5248 * If the subpool has a minimum size, the number of global 5249 * reservations to be released may be adjusted. 5250 */ 5251 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 5252 hugetlb_acct_memory(h, -gbl_reserve); 5253 5254 return 0; 5255 } 5256 5257 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 5258 static unsigned long page_table_shareable(struct vm_area_struct *svma, 5259 struct vm_area_struct *vma, 5260 unsigned long addr, pgoff_t idx) 5261 { 5262 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 5263 svma->vm_start; 5264 unsigned long sbase = saddr & PUD_MASK; 5265 unsigned long s_end = sbase + PUD_SIZE; 5266 5267 /* Allow segments to share if only one is marked locked */ 5268 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 5269 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 5270 5271 /* 5272 * match the virtual addresses, permission and the alignment of the 5273 * page table page. 5274 */ 5275 if (pmd_index(addr) != pmd_index(saddr) || 5276 vm_flags != svm_flags || 5277 !range_in_vma(svma, sbase, s_end)) 5278 return 0; 5279 5280 return saddr; 5281 } 5282 5283 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 5284 { 5285 unsigned long base = addr & PUD_MASK; 5286 unsigned long end = base + PUD_SIZE; 5287 5288 /* 5289 * check on proper vm_flags and page table alignment 5290 */ 5291 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) 5292 return true; 5293 return false; 5294 } 5295 5296 /* 5297 * Determine if start,end range within vma could be mapped by shared pmd. 5298 * If yes, adjust start and end to cover range associated with possible 5299 * shared pmd mappings. 5300 */ 5301 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 5302 unsigned long *start, unsigned long *end) 5303 { 5304 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 5305 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 5306 5307 /* 5308 * vma need span at least one aligned PUD size and the start,end range 5309 * must at least partialy within it. 5310 */ 5311 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 5312 (*end <= v_start) || (*start >= v_end)) 5313 return; 5314 5315 /* Extend the range to be PUD aligned for a worst case scenario */ 5316 if (*start > v_start) 5317 *start = ALIGN_DOWN(*start, PUD_SIZE); 5318 5319 if (*end < v_end) 5320 *end = ALIGN(*end, PUD_SIZE); 5321 } 5322 5323 /* 5324 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 5325 * and returns the corresponding pte. While this is not necessary for the 5326 * !shared pmd case because we can allocate the pmd later as well, it makes the 5327 * code much cleaner. 5328 * 5329 * This routine must be called with i_mmap_rwsem held in at least read mode if 5330 * sharing is possible. For hugetlbfs, this prevents removal of any page 5331 * table entries associated with the address space. This is important as we 5332 * are setting up sharing based on existing page table entries (mappings). 5333 * 5334 * NOTE: This routine is only called from huge_pte_alloc. Some callers of 5335 * huge_pte_alloc know that sharing is not possible and do not take 5336 * i_mmap_rwsem as a performance optimization. This is handled by the 5337 * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is 5338 * only required for subsequent processing. 5339 */ 5340 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 5341 { 5342 struct vm_area_struct *vma = find_vma(mm, addr); 5343 struct address_space *mapping = vma->vm_file->f_mapping; 5344 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 5345 vma->vm_pgoff; 5346 struct vm_area_struct *svma; 5347 unsigned long saddr; 5348 pte_t *spte = NULL; 5349 pte_t *pte; 5350 spinlock_t *ptl; 5351 5352 if (!vma_shareable(vma, addr)) 5353 return (pte_t *)pmd_alloc(mm, pud, addr); 5354 5355 i_mmap_assert_locked(mapping); 5356 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 5357 if (svma == vma) 5358 continue; 5359 5360 saddr = page_table_shareable(svma, vma, addr, idx); 5361 if (saddr) { 5362 spte = huge_pte_offset(svma->vm_mm, saddr, 5363 vma_mmu_pagesize(svma)); 5364 if (spte) { 5365 get_page(virt_to_page(spte)); 5366 break; 5367 } 5368 } 5369 } 5370 5371 if (!spte) 5372 goto out; 5373 5374 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 5375 if (pud_none(*pud)) { 5376 pud_populate(mm, pud, 5377 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 5378 mm_inc_nr_pmds(mm); 5379 } else { 5380 put_page(virt_to_page(spte)); 5381 } 5382 spin_unlock(ptl); 5383 out: 5384 pte = (pte_t *)pmd_alloc(mm, pud, addr); 5385 return pte; 5386 } 5387 5388 /* 5389 * unmap huge page backed by shared pte. 5390 * 5391 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 5392 * indicated by page_count > 1, unmap is achieved by clearing pud and 5393 * decrementing the ref count. If count == 1, the pte page is not shared. 5394 * 5395 * Called with page table lock held and i_mmap_rwsem held in write mode. 5396 * 5397 * returns: 1 successfully unmapped a shared pte page 5398 * 0 the underlying pte page is not shared, or it is the last user 5399 */ 5400 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 5401 unsigned long *addr, pte_t *ptep) 5402 { 5403 pgd_t *pgd = pgd_offset(mm, *addr); 5404 p4d_t *p4d = p4d_offset(pgd, *addr); 5405 pud_t *pud = pud_offset(p4d, *addr); 5406 5407 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 5408 BUG_ON(page_count(virt_to_page(ptep)) == 0); 5409 if (page_count(virt_to_page(ptep)) == 1) 5410 return 0; 5411 5412 pud_clear(pud); 5413 put_page(virt_to_page(ptep)); 5414 mm_dec_nr_pmds(mm); 5415 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 5416 return 1; 5417 } 5418 #define want_pmd_share() (1) 5419 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 5420 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 5421 { 5422 return NULL; 5423 } 5424 5425 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 5426 unsigned long *addr, pte_t *ptep) 5427 { 5428 return 0; 5429 } 5430 5431 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 5432 unsigned long *start, unsigned long *end) 5433 { 5434 } 5435 #define want_pmd_share() (0) 5436 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 5437 5438 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 5439 pte_t *huge_pte_alloc(struct mm_struct *mm, 5440 unsigned long addr, unsigned long sz) 5441 { 5442 pgd_t *pgd; 5443 p4d_t *p4d; 5444 pud_t *pud; 5445 pte_t *pte = NULL; 5446 5447 pgd = pgd_offset(mm, addr); 5448 p4d = p4d_alloc(mm, pgd, addr); 5449 if (!p4d) 5450 return NULL; 5451 pud = pud_alloc(mm, p4d, addr); 5452 if (pud) { 5453 if (sz == PUD_SIZE) { 5454 pte = (pte_t *)pud; 5455 } else { 5456 BUG_ON(sz != PMD_SIZE); 5457 if (want_pmd_share() && pud_none(*pud)) 5458 pte = huge_pmd_share(mm, addr, pud); 5459 else 5460 pte = (pte_t *)pmd_alloc(mm, pud, addr); 5461 } 5462 } 5463 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 5464 5465 return pte; 5466 } 5467 5468 /* 5469 * huge_pte_offset() - Walk the page table to resolve the hugepage 5470 * entry at address @addr 5471 * 5472 * Return: Pointer to page table entry (PUD or PMD) for 5473 * address @addr, or NULL if a !p*d_present() entry is encountered and the 5474 * size @sz doesn't match the hugepage size at this level of the page 5475 * table. 5476 */ 5477 pte_t *huge_pte_offset(struct mm_struct *mm, 5478 unsigned long addr, unsigned long sz) 5479 { 5480 pgd_t *pgd; 5481 p4d_t *p4d; 5482 pud_t *pud; 5483 pmd_t *pmd; 5484 5485 pgd = pgd_offset(mm, addr); 5486 if (!pgd_present(*pgd)) 5487 return NULL; 5488 p4d = p4d_offset(pgd, addr); 5489 if (!p4d_present(*p4d)) 5490 return NULL; 5491 5492 pud = pud_offset(p4d, addr); 5493 if (sz == PUD_SIZE) 5494 /* must be pud huge, non-present or none */ 5495 return (pte_t *)pud; 5496 if (!pud_present(*pud)) 5497 return NULL; 5498 /* must have a valid entry and size to go further */ 5499 5500 pmd = pmd_offset(pud, addr); 5501 /* must be pmd huge, non-present or none */ 5502 return (pte_t *)pmd; 5503 } 5504 5505 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 5506 5507 /* 5508 * These functions are overwritable if your architecture needs its own 5509 * behavior. 5510 */ 5511 struct page * __weak 5512 follow_huge_addr(struct mm_struct *mm, unsigned long address, 5513 int write) 5514 { 5515 return ERR_PTR(-EINVAL); 5516 } 5517 5518 struct page * __weak 5519 follow_huge_pd(struct vm_area_struct *vma, 5520 unsigned long address, hugepd_t hpd, int flags, int pdshift) 5521 { 5522 WARN(1, "hugepd follow called with no support for hugepage directory format\n"); 5523 return NULL; 5524 } 5525 5526 struct page * __weak 5527 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 5528 pmd_t *pmd, int flags) 5529 { 5530 struct page *page = NULL; 5531 spinlock_t *ptl; 5532 pte_t pte; 5533 5534 /* FOLL_GET and FOLL_PIN are mutually exclusive. */ 5535 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == 5536 (FOLL_PIN | FOLL_GET))) 5537 return NULL; 5538 5539 retry: 5540 ptl = pmd_lockptr(mm, pmd); 5541 spin_lock(ptl); 5542 /* 5543 * make sure that the address range covered by this pmd is not 5544 * unmapped from other threads. 5545 */ 5546 if (!pmd_huge(*pmd)) 5547 goto out; 5548 pte = huge_ptep_get((pte_t *)pmd); 5549 if (pte_present(pte)) { 5550 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 5551 /* 5552 * try_grab_page() should always succeed here, because: a) we 5553 * hold the pmd (ptl) lock, and b) we've just checked that the 5554 * huge pmd (head) page is present in the page tables. The ptl 5555 * prevents the head page and tail pages from being rearranged 5556 * in any way. So this page must be available at this point, 5557 * unless the page refcount overflowed: 5558 */ 5559 if (WARN_ON_ONCE(!try_grab_page(page, flags))) { 5560 page = NULL; 5561 goto out; 5562 } 5563 } else { 5564 if (is_hugetlb_entry_migration(pte)) { 5565 spin_unlock(ptl); 5566 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 5567 goto retry; 5568 } 5569 /* 5570 * hwpoisoned entry is treated as no_page_table in 5571 * follow_page_mask(). 5572 */ 5573 } 5574 out: 5575 spin_unlock(ptl); 5576 return page; 5577 } 5578 5579 struct page * __weak 5580 follow_huge_pud(struct mm_struct *mm, unsigned long address, 5581 pud_t *pud, int flags) 5582 { 5583 if (flags & (FOLL_GET | FOLL_PIN)) 5584 return NULL; 5585 5586 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 5587 } 5588 5589 struct page * __weak 5590 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) 5591 { 5592 if (flags & (FOLL_GET | FOLL_PIN)) 5593 return NULL; 5594 5595 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); 5596 } 5597 5598 bool isolate_huge_page(struct page *page, struct list_head *list) 5599 { 5600 bool ret = true; 5601 5602 spin_lock(&hugetlb_lock); 5603 if (!PageHeadHuge(page) || 5604 !HPageMigratable(page) || 5605 !get_page_unless_zero(page)) { 5606 ret = false; 5607 goto unlock; 5608 } 5609 ClearHPageMigratable(page); 5610 list_move_tail(&page->lru, list); 5611 unlock: 5612 spin_unlock(&hugetlb_lock); 5613 return ret; 5614 } 5615 5616 void putback_active_hugepage(struct page *page) 5617 { 5618 spin_lock(&hugetlb_lock); 5619 SetHPageMigratable(page); 5620 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 5621 spin_unlock(&hugetlb_lock); 5622 put_page(page); 5623 } 5624 5625 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) 5626 { 5627 struct hstate *h = page_hstate(oldpage); 5628 5629 hugetlb_cgroup_migrate(oldpage, newpage); 5630 set_page_owner_migrate_reason(newpage, reason); 5631 5632 /* 5633 * transfer temporary state of the new huge page. This is 5634 * reverse to other transitions because the newpage is going to 5635 * be final while the old one will be freed so it takes over 5636 * the temporary status. 5637 * 5638 * Also note that we have to transfer the per-node surplus state 5639 * here as well otherwise the global surplus count will not match 5640 * the per-node's. 5641 */ 5642 if (HPageTemporary(newpage)) { 5643 int old_nid = page_to_nid(oldpage); 5644 int new_nid = page_to_nid(newpage); 5645 5646 SetHPageTemporary(oldpage); 5647 ClearHPageTemporary(newpage); 5648 5649 spin_lock(&hugetlb_lock); 5650 if (h->surplus_huge_pages_node[old_nid]) { 5651 h->surplus_huge_pages_node[old_nid]--; 5652 h->surplus_huge_pages_node[new_nid]++; 5653 } 5654 spin_unlock(&hugetlb_lock); 5655 } 5656 } 5657 5658 #ifdef CONFIG_CMA 5659 static bool cma_reserve_called __initdata; 5660 5661 static int __init cmdline_parse_hugetlb_cma(char *p) 5662 { 5663 hugetlb_cma_size = memparse(p, &p); 5664 return 0; 5665 } 5666 5667 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 5668 5669 void __init hugetlb_cma_reserve(int order) 5670 { 5671 unsigned long size, reserved, per_node; 5672 int nid; 5673 5674 cma_reserve_called = true; 5675 5676 if (!hugetlb_cma_size) 5677 return; 5678 5679 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 5680 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 5681 (PAGE_SIZE << order) / SZ_1M); 5682 return; 5683 } 5684 5685 /* 5686 * If 3 GB area is requested on a machine with 4 numa nodes, 5687 * let's allocate 1 GB on first three nodes and ignore the last one. 5688 */ 5689 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 5690 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 5691 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 5692 5693 reserved = 0; 5694 for_each_node_state(nid, N_ONLINE) { 5695 int res; 5696 char name[CMA_MAX_NAME]; 5697 5698 size = min(per_node, hugetlb_cma_size - reserved); 5699 size = round_up(size, PAGE_SIZE << order); 5700 5701 snprintf(name, sizeof(name), "hugetlb%d", nid); 5702 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order, 5703 0, false, name, 5704 &hugetlb_cma[nid], nid); 5705 if (res) { 5706 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 5707 res, nid); 5708 continue; 5709 } 5710 5711 reserved += size; 5712 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 5713 size / SZ_1M, nid); 5714 5715 if (reserved >= hugetlb_cma_size) 5716 break; 5717 } 5718 } 5719 5720 void __init hugetlb_cma_check(void) 5721 { 5722 if (!hugetlb_cma_size || cma_reserve_called) 5723 return; 5724 5725 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 5726 } 5727 5728 #endif /* CONFIG_CMA */ 5729