Lines Matching +full:link +full:- +full:trigger +full:- +full:order +full:- +full:start
1 // SPDX-License-Identifier: GPL-2.0-only
90 unsigned long start, unsigned long end);
106 if (spool->count) in subpool_is_free()
108 if (spool->max_hpages != -1) in subpool_is_free()
109 return spool->used_hpages == 0; in subpool_is_free()
110 if (spool->min_hpages != -1) in subpool_is_free()
111 return spool->rsv_hpages == spool->min_hpages; in subpool_is_free()
119 spin_unlock_irqrestore(&spool->lock, irq_flags); in unlock_or_release_subpool()
125 if (spool->min_hpages != -1) in unlock_or_release_subpool()
126 hugetlb_acct_memory(spool->hstate, in unlock_or_release_subpool()
127 -spool->min_hpages); in unlock_or_release_subpool()
141 spin_lock_init(&spool->lock); in hugepage_new_subpool()
142 spool->count = 1; in hugepage_new_subpool()
143 spool->max_hpages = max_hpages; in hugepage_new_subpool()
144 spool->hstate = h; in hugepage_new_subpool()
145 spool->min_hpages = min_hpages; in hugepage_new_subpool()
147 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
151 spool->rsv_hpages = min_hpages; in hugepage_new_subpool()
160 spin_lock_irqsave(&spool->lock, flags); in hugepage_put_subpool()
161 BUG_ON(!spool->count); in hugepage_put_subpool()
162 spool->count--; in hugepage_put_subpool()
168 * Return -ENOMEM if there are not enough resources to satisfy the
182 spin_lock_irq(&spool->lock); in hugepage_subpool_get_pages()
184 if (spool->max_hpages != -1) { /* maximum size accounting */ in hugepage_subpool_get_pages()
185 if ((spool->used_hpages + delta) <= spool->max_hpages) in hugepage_subpool_get_pages()
186 spool->used_hpages += delta; in hugepage_subpool_get_pages()
188 ret = -ENOMEM; in hugepage_subpool_get_pages()
194 if (spool->min_hpages != -1 && spool->rsv_hpages) { in hugepage_subpool_get_pages()
195 if (delta > spool->rsv_hpages) { in hugepage_subpool_get_pages()
200 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages()
201 spool->rsv_hpages = 0; in hugepage_subpool_get_pages()
204 spool->rsv_hpages -= delta; in hugepage_subpool_get_pages()
209 spin_unlock_irq(&spool->lock); in hugepage_subpool_get_pages()
228 spin_lock_irqsave(&spool->lock, flags); in hugepage_subpool_put_pages()
230 if (spool->max_hpages != -1) /* maximum size accounting */ in hugepage_subpool_put_pages()
231 spool->used_hpages -= delta; in hugepage_subpool_put_pages()
234 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { in hugepage_subpool_put_pages()
235 if (spool->rsv_hpages + delta <= spool->min_hpages) in hugepage_subpool_put_pages()
238 ret = spool->rsv_hpages + delta - spool->min_hpages; in hugepage_subpool_put_pages()
240 spool->rsv_hpages += delta; in hugepage_subpool_put_pages()
241 if (spool->rsv_hpages > spool->min_hpages) in hugepage_subpool_put_pages()
242 spool->rsv_hpages = spool->min_hpages; in hugepage_subpool_put_pages()
256 return HUGETLBFS_SB(inode->i_sb)->spool; in subpool_inode()
261 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
270 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_read()
272 down_read(&vma_lock->rw_sema); in hugetlb_vma_lock_read()
276 down_read(&resv_map->rw_sema); in hugetlb_vma_lock_read()
283 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_read()
285 up_read(&vma_lock->rw_sema); in hugetlb_vma_unlock_read()
289 up_read(&resv_map->rw_sema); in hugetlb_vma_unlock_read()
296 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_write()
298 down_write(&vma_lock->rw_sema); in hugetlb_vma_lock_write()
302 down_write(&resv_map->rw_sema); in hugetlb_vma_lock_write()
309 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_write()
311 up_write(&vma_lock->rw_sema); in hugetlb_vma_unlock_write()
315 up_write(&resv_map->rw_sema); in hugetlb_vma_unlock_write()
323 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_trylock_write()
325 return down_write_trylock(&vma_lock->rw_sema); in hugetlb_vma_trylock_write()
329 return down_write_trylock(&resv_map->rw_sema); in hugetlb_vma_trylock_write()
338 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_assert_locked()
340 lockdep_assert_held(&vma_lock->rw_sema); in hugetlb_vma_assert_locked()
344 lockdep_assert_held(&resv_map->rw_sema); in hugetlb_vma_assert_locked()
358 struct vm_area_struct *vma = vma_lock->vma; in __hugetlb_vma_unlock_write_put()
363 * Semaphore synchronizes access to vma_lock->vma field. in __hugetlb_vma_unlock_write_put()
365 vma_lock->vma = NULL; in __hugetlb_vma_unlock_write_put()
366 vma->vm_private_data = NULL; in __hugetlb_vma_unlock_write_put()
367 up_write(&vma_lock->rw_sema); in __hugetlb_vma_unlock_write_put()
368 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); in __hugetlb_vma_unlock_write_put()
374 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in __hugetlb_vma_unlock_write_free()
381 up_write(&resv_map->rw_sema); in __hugetlb_vma_unlock_write_free()
393 if (vma->vm_private_data) { in hugetlb_vma_lock_free()
394 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_free()
396 down_write(&vma_lock->rw_sema); in hugetlb_vma_lock_free()
406 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) in hugetlb_vma_lock_alloc()
409 /* Should never get here with non-NULL vm_private_data */ in hugetlb_vma_lock_alloc()
410 if (vma->vm_private_data) in hugetlb_vma_lock_alloc()
429 kref_init(&vma_lock->refs); in hugetlb_vma_lock_alloc()
430 init_rwsem(&vma_lock->rw_sema); in hugetlb_vma_lock_alloc()
431 vma_lock->vma = vma; in hugetlb_vma_lock_alloc()
432 vma->vm_private_data = vma_lock; in hugetlb_vma_lock_alloc()
443 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache()
445 resv->region_cache_count--; in get_file_region_entry_from_cache()
446 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache()
447 list_del(&nrg->link); in get_file_region_entry_from_cache()
449 nrg->from = from; in get_file_region_entry_from_cache()
450 nrg->to = to; in get_file_region_entry_from_cache()
459 nrg->reservation_counter = rg->reservation_counter; in copy_hugetlb_cgroup_uncharge_info()
460 nrg->css = rg->css; in copy_hugetlb_cgroup_uncharge_info()
461 if (rg->css) in copy_hugetlb_cgroup_uncharge_info()
462 css_get(rg->css); in copy_hugetlb_cgroup_uncharge_info()
474 nrg->reservation_counter = in record_hugetlb_cgroup_uncharge_info()
475 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
476 nrg->css = &h_cg->css; in record_hugetlb_cgroup_uncharge_info()
478 * The caller will hold exactly one h_cg->css reference for the in record_hugetlb_cgroup_uncharge_info()
482 * reference. In order to ensure that one file_region must hold in record_hugetlb_cgroup_uncharge_info()
483 * exactly one h_cg->css reference, we should do css_get for in record_hugetlb_cgroup_uncharge_info()
487 css_get(&h_cg->css); in record_hugetlb_cgroup_uncharge_info()
488 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info()
489 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
493 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
495 nrg->reservation_counter = NULL; in record_hugetlb_cgroup_uncharge_info()
496 nrg->css = NULL; in record_hugetlb_cgroup_uncharge_info()
504 if (rg->css) in put_uncharge_info()
505 css_put(rg->css); in put_uncharge_info()
513 return rg->reservation_counter == org->reservation_counter && in has_same_uncharge_info()
514 rg->css == org->css; in has_same_uncharge_info()
525 prg = list_prev_entry(rg, link); in coalesce_file_region()
526 if (&prg->link != &resv->regions && prg->to == rg->from && in coalesce_file_region()
528 prg->to = rg->to; in coalesce_file_region()
530 list_del(&rg->link); in coalesce_file_region()
537 nrg = list_next_entry(rg, link); in coalesce_file_region()
538 if (&nrg->link != &resv->regions && nrg->from == rg->to && in coalesce_file_region()
540 nrg->from = rg->from; in coalesce_file_region()
542 list_del(&rg->link); in coalesce_file_region()
558 list_add(&nrg->link, rg); in hugetlb_resv_map_add()
563 return to - from; in hugetlb_resv_map_add()
567 * Must be called with resv->lock held.
579 struct list_head *head = &resv->regions; in add_reservation_in_range()
588 * [last_accounted_offset, iter->from), at every iteration, with some in add_reservation_in_range()
591 list_for_each_entry_safe(iter, trg, head, link) { in add_reservation_in_range()
592 /* Skip irrelevant regions that start before our range. */ in add_reservation_in_range()
593 if (iter->from < f) { in add_reservation_in_range()
597 if (iter->to > last_accounted_offset) in add_reservation_in_range()
598 last_accounted_offset = iter->to; in add_reservation_in_range()
605 if (iter->from >= t) { in add_reservation_in_range()
606 rg = iter->link.prev; in add_reservation_in_range()
610 /* Add an entry for last_accounted_offset -> iter->from, and in add_reservation_in_range()
613 if (iter->from > last_accounted_offset) in add_reservation_in_range()
614 add += hugetlb_resv_map_add(resv, iter->link.prev, in add_reservation_in_range()
616 iter->from, h, h_cg, in add_reservation_in_range()
619 last_accounted_offset = iter->to; in add_reservation_in_range()
626 rg = head->prev; in add_reservation_in_range()
634 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
638 __must_hold(&resv->lock) in allocate_file_region_entries()
655 while (resv->region_cache_count < in allocate_file_region_entries()
656 (resv->adds_in_progress + regions_needed)) { in allocate_file_region_entries()
657 to_allocate = resv->adds_in_progress + regions_needed - in allocate_file_region_entries()
658 resv->region_cache_count; in allocate_file_region_entries()
664 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); in allocate_file_region_entries()
666 spin_unlock(&resv->lock); in allocate_file_region_entries()
671 list_add(&trg->link, &allocated_regions); in allocate_file_region_entries()
674 spin_lock(&resv->lock); in allocate_file_region_entries()
676 list_splice(&allocated_regions, &resv->region_cache); in allocate_file_region_entries()
677 resv->region_cache_count += to_allocate; in allocate_file_region_entries()
683 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { in allocate_file_region_entries()
684 list_del(&rg->link); in allocate_file_region_entries()
687 return -ENOMEM; in allocate_file_region_entries()
702 * this operation and we were not able to allocate, it returns -ENOMEM.
713 spin_lock(&resv->lock); in region_add()
730 resv->region_cache_count < in region_add()
731 resv->adds_in_progress + in region_add()
732 (actual_regions_needed - in_regions_needed)) { in region_add()
736 VM_BUG_ON(t - f <= 1); in region_add()
739 resv, actual_regions_needed - in_regions_needed)) { in region_add()
740 return -ENOMEM; in region_add()
748 resv->adds_in_progress -= in_regions_needed; in region_add()
750 spin_unlock(&resv->lock); in region_add()
766 * resv->adds_in_progress. This value needs to be provided to a follow up call
771 * zero. -ENOMEM is returned if a new file_region structure or cache entry
779 spin_lock(&resv->lock); in region_chg()
789 return -ENOMEM; in region_chg()
791 resv->adds_in_progress += *out_regions_needed; in region_chg()
793 spin_unlock(&resv->lock); in region_chg()
813 spin_lock(&resv->lock); in region_abort()
814 VM_BUG_ON(!resv->region_cache_count); in region_abort()
815 resv->adds_in_progress -= regions_needed; in region_abort()
816 spin_unlock(&resv->lock); in region_abort()
828 * be allocated. If the allocation fails, -ENOMEM will be returned.
830 * a region and possibly return -ENOMEM. Callers specifying
831 * t == LONG_MAX do not need to check for -ENOMEM error.
835 struct list_head *head = &resv->regions; in region_del()
841 spin_lock(&resv->lock); in region_del()
842 list_for_each_entry_safe(rg, trg, head, link) { in region_del()
850 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) in region_del()
853 if (rg->from >= t) in region_del()
856 if (f > rg->from && t < rg->to) { /* Must split region */ in region_del()
862 resv->region_cache_count > resv->adds_in_progress) { in region_del()
863 nrg = list_first_entry(&resv->region_cache, in region_del()
865 link); in region_del()
866 list_del(&nrg->link); in region_del()
867 resv->region_cache_count--; in region_del()
871 spin_unlock(&resv->lock); in region_del()
874 return -ENOMEM; in region_del()
878 del += t - f; in region_del()
880 resv, rg, t - f, false); in region_del()
883 nrg->from = t; in region_del()
884 nrg->to = rg->to; in region_del()
888 INIT_LIST_HEAD(&nrg->link); in region_del()
891 rg->to = f; in region_del()
893 list_add(&nrg->link, &rg->link); in region_del()
898 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ in region_del()
899 del += rg->to - rg->from; in region_del()
901 rg->to - rg->from, true); in region_del()
902 list_del(&rg->link); in region_del()
907 if (f <= rg->from) { /* Trim beginning of region */ in region_del()
909 t - rg->from, false); in region_del()
911 del += t - rg->from; in region_del()
912 rg->from = t; in region_del()
915 rg->to - f, false); in region_del()
917 del += rg->to - f; in region_del()
918 rg->to = f; in region_del()
922 spin_unlock(&resv->lock); in region_del()
962 struct list_head *head = &resv->regions; in region_count()
966 spin_lock(&resv->lock); in region_count()
968 list_for_each_entry(rg, head, link) { in region_count()
972 if (rg->to <= f) in region_count()
974 if (rg->from >= t) in region_count()
977 seg_from = max(rg->from, f); in region_count()
978 seg_to = min(rg->to, t); in region_count()
980 chg += seg_to - seg_from; in region_count()
982 spin_unlock(&resv->lock); in region_count()
994 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
995 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
999 * vma_kernel_pagesize - Page size granularity for this VMA.
1009 if (vma->vm_ops && vma->vm_ops->pagesize) in vma_kernel_pagesize()
1010 return vma->vm_ops->pagesize(vma); in vma_kernel_pagesize()
1018 * architectures where it differs, an architecture-specific 'strong'
1056 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
1062 vma->vm_private_data = (void *)value; in set_vma_private_data()
1072 resv_map->reservation_counter = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
1073 resv_map->pages_per_hpage = 0; in resv_map_set_hugetlb_cgroup_uncharge_info()
1074 resv_map->css = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
1076 resv_map->reservation_counter = in resv_map_set_hugetlb_cgroup_uncharge_info()
1077 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
1078 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1079 resv_map->css = &h_cg->css; in resv_map_set_hugetlb_cgroup_uncharge_info()
1095 kref_init(&resv_map->refs); in resv_map_alloc()
1096 spin_lock_init(&resv_map->lock); in resv_map_alloc()
1097 INIT_LIST_HEAD(&resv_map->regions); in resv_map_alloc()
1098 init_rwsem(&resv_map->rw_sema); in resv_map_alloc()
1100 resv_map->adds_in_progress = 0; in resv_map_alloc()
1104 * re-initialized to the proper values, to indicate that hugetlb cgroup in resv_map_alloc()
1105 * reservations are to be un-charged from here. in resv_map_alloc()
1109 INIT_LIST_HEAD(&resv_map->region_cache); in resv_map_alloc()
1110 list_add(&rg->link, &resv_map->region_cache); in resv_map_alloc()
1111 resv_map->region_cache_count = 1; in resv_map_alloc()
1119 struct list_head *head = &resv_map->region_cache; in resv_map_release()
1126 list_for_each_entry_safe(rg, trg, head, link) { in resv_map_release()
1127 list_del(&rg->link); in resv_map_release()
1131 VM_BUG_ON(resv_map->adds_in_progress); in resv_map_release()
1143 * The VERY common case is inode->mapping == &inode->i_data but, in inode_resv_map()
1146 return (struct resv_map *)(&inode->i_data)->i_private_data; in inode_resv_map()
1152 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
1153 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
1154 struct inode *inode = mapping->host; in vma_resv_map()
1167 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
1175 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
1189 return !(vma->vm_flags & VM_MAYSHARE) && in __vma_private_lock()
1199 * - For shared mappings this is a per-vma semaphore that may be in hugetlb_dup_vma_private()
1205 * - For MAP_PRIVATE mappings, this is the reserve map which does in hugetlb_dup_vma_private()
1207 * not guaranteed to succeed, even if read-only. in hugetlb_dup_vma_private()
1209 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_dup_vma_private()
1210 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_dup_vma_private()
1212 if (vma_lock && vma_lock->vma != vma) in hugetlb_dup_vma_private()
1213 vma->vm_private_data = NULL; in hugetlb_dup_vma_private()
1215 vma->vm_private_data = NULL; in hugetlb_dup_vma_private()
1220 * Called with mm->mmap_lock writer semaphore held.
1243 kref_put(&reservations->refs, resv_map_release); in clear_vma_resv_huge_pages()
1252 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
1260 * properly, so add work-around here. in vma_has_reserves()
1262 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
1269 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
1293 * Very Subtle - The value of chg comes from a previous in vma_has_reserves()
1319 list_move(&folio->lru, &h->hugepage_freelists[nid]); in enqueue_hugetlb_folio()
1320 h->free_huge_pages++; in enqueue_hugetlb_folio()
1321 h->free_huge_pages_node[nid]++; in enqueue_hugetlb_folio()
1329 bool pin = !!(current->flags & PF_MEMALLOC_PIN); in dequeue_hugetlb_folio_node_exact()
1332 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { in dequeue_hugetlb_folio_node_exact()
1339 list_move(&folio->lru, &h->hugepage_activelist); in dequeue_hugetlb_folio_node_exact()
1342 h->free_huge_pages--; in dequeue_hugetlb_folio_node_exact()
1343 h->free_huge_pages_node[nid]--; in dequeue_hugetlb_folio_node_exact()
1392 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1435 h->resv_huge_pages--; in dequeue_hugetlb_folio_vma()
1448 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1487 * helper for remove_pool_hugetlb_folio() - return the previously saved
1498 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1499 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1508 nr_nodes--)
1514 nr_nodes--)
1522 int order = huge_page_order(h); in alloc_gigantic_folio() local
1534 folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); in alloc_gigantic_folio()
1541 folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); in alloc_gigantic_folio()
1549 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); in alloc_gigantic_folio()
1602 list_del(&folio->lru); in remove_hugetlb_folio()
1606 h->free_huge_pages--; in remove_hugetlb_folio()
1607 h->free_huge_pages_node[nid]--; in remove_hugetlb_folio()
1610 h->surplus_huge_pages--; in remove_hugetlb_folio()
1611 h->surplus_huge_pages_node[nid]--; in remove_hugetlb_folio()
1622 h->nr_huge_pages--; in remove_hugetlb_folio()
1623 h->nr_huge_pages_node[nid]--; in remove_hugetlb_folio()
1635 INIT_LIST_HEAD(&folio->lru); in add_hugetlb_folio()
1636 h->nr_huge_pages++; in add_hugetlb_folio()
1637 h->nr_huge_pages_node[nid]++; in add_hugetlb_folio()
1640 h->surplus_huge_pages++; in add_hugetlb_folio()
1641 h->surplus_huge_pages_node[nid]++; in add_hugetlb_folio()
1707 INIT_LIST_HEAD(&folio->_deferred_list); in __update_and_free_hugetlb_folio()
1718 * freed and frees them one-by-one. As the page->mapping pointer is going
1736 node = node->next; in free_hpage_workfn()
1737 folio->mapping = NULL; in free_hpage_workfn()
1740 * folio_hstate() is going to trigger because a previous call to in free_hpage_workfn()
1774 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) in update_and_free_hugetlb_folio()
1793 list_del(&folio->lru); in bulk_vmemmap_restore_error()
1813 list_del(&folio->lru); in bulk_vmemmap_restore_error()
1818 list_del(&folio->lru); in bulk_vmemmap_restore_error()
1898 __ClearPageAnonExclusive(&folio->page); in free_huge_folio()
1899 folio->mapping = NULL; in free_huge_folio()
1928 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); in free_huge_folio()
1931 h->resv_huge_pages++; in free_huge_folio()
1937 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_folio()
1955 h->nr_huge_pages++; in __prep_account_new_huge_page()
1956 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1962 INIT_LIST_HEAD(&folio->lru); in init_new_hugetlb_folio()
1986 * stable. Due to locking order, we can only trylock_write. If we can
2006 int order = huge_page_order(h); in alloc_buddy_hugetlb_folio() local
2025 folio = __folio_alloc(gfp_mask, order, nid, nmask); in alloc_buddy_hugetlb_folio()
2165 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_hugetlb_folio()
2166 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_hugetlb_folio()
2167 folio = list_entry(h->hugepage_freelists[node].next, in remove_pool_hugetlb_folio()
2179 * does nothing for in-use hugetlb folios and non-hugetlb folios.
2182 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2186 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2193 int rc = -EBUSY; in dissolve_free_hugetlb_folio()
2220 * Theoretically, we should return -EBUSY when we in dissolve_free_hugetlb_folio()
2231 h->max_huge_pages--; in dissolve_free_hugetlb_folio()
2244 * non-vmemmap optimized hugetlb folios. in dissolve_free_hugetlb_folio()
2251 h->max_huge_pages++; in dissolve_free_hugetlb_folio()
2278 unsigned int order; in dissolve_free_hugetlb_folios() local
2284 order = huge_page_order(&default_hstate); in dissolve_free_hugetlb_folios()
2286 order = min(order, huge_page_order(h)); in dissolve_free_hugetlb_folios()
2288 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { in dissolve_free_hugetlb_folios()
2310 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_hugetlb_folio()
2326 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_hugetlb_folio()
2333 h->surplus_huge_pages++; in alloc_surplus_hugetlb_folio()
2334 h->surplus_huge_pages_node[folio_nid(folio)]++; in alloc_surplus_hugetlb_folio()
2403 VM_BUG_ON(!h->resv_huge_pages); in alloc_hugetlb_folio_reserve()
2404 h->resv_huge_pages--; in alloc_hugetlb_folio_reserve()
2428 /* We cannot fallback to other nodes, as we could break the per-node pool. */ in alloc_hugetlb_folio_nodemask()
2444 if (mpol->mode == MPOL_BIND && in policy_mbind_nodemask()
2446 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) in policy_mbind_nodemask()
2447 return &mpol->nodes; in policy_mbind_nodemask()
2469 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2471 h->resv_huge_pages += delta; in gather_surplus_pages()
2477 ret = -ENOMEM; in gather_surplus_pages()
2494 list_add(&folio->lru, &surplus_list); in gather_surplus_pages()
2504 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2505 (h->free_huge_pages + allocated); in gather_surplus_pages()
2525 h->resv_huge_pages += delta; in gather_surplus_pages()
2530 if ((--needed) < 0) in gather_surplus_pages()
2565 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2572 * by pre-allocated pages. Only free surplus pages. in return_unused_surplus_pages()
2574 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2582 * on-line nodes with memory and will handle the hstate accounting. in return_unused_surplus_pages()
2584 while (nr_pages--) { in return_unused_surplus_pages()
2591 list_add(&folio->lru, &page_list); in return_unused_surplus_pages()
2670 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common()
2680 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common()
2693 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) in __vma_reservation_common()
2702 * Subtle - The reserve map for private mappings has the in __vma_reservation_common()
2822 if (!(vma->vm_flags & VM_MAYSHARE)) in restore_reserve_on_error()
2841 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2868 * Fail with -EBUSY if not possible. in alloc_and_dissolve_hugetlb_folio()
2872 ret = isolated ? 0 : -EBUSY; in alloc_and_dissolve_hugetlb_folio()
2890 return -ENOMEM; in alloc_and_dissolve_hugetlb_folio()
2932 int ret = -EBUSY; in isolate_or_dissolve_huge_page()
2950 * alloc_contig_range and them. Return -ENOMEM as this has the effect in isolate_or_dissolve_huge_page()
2954 return -ENOMEM; in isolate_or_dissolve_huge_page()
2980 if (memcg_charge_ret == -ENOMEM) { in alloc_hugetlb_folio()
2982 return ERR_PTR(-ENOMEM); in alloc_hugetlb_folio()
2996 return ERR_PTR(-ENOMEM); in alloc_hugetlb_folio()
3052 h->resv_huge_pages--; in alloc_hugetlb_folio()
3054 list_add(&folio->lru, &h->hugepage_activelist); in alloc_hugetlb_folio()
3086 hugetlb_acct_memory(h, -rsv_adjust); in alloc_hugetlb_folio()
3116 return ERR_PTR(-ENOSPC); in alloc_hugetlb_folio()
3135 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
3159 huge_page_size(h) - PAGE_SIZE); in __alloc_bootmem_huge_page()
3161 INIT_LIST_HEAD(&m->list); in __alloc_bootmem_huge_page()
3162 list_add(&m->list, &huge_boot_pages[node]); in __alloc_bootmem_huge_page()
3163 m->hstate = h; in __alloc_bootmem_huge_page()
3181 __ClearPageReserved(folio_page(folio, pfn - head_pfn)); in hugetlb_folio_init_tail_vmemmap()
3183 prep_compound_tail((struct page *)folio, pfn - head_pfn); in hugetlb_folio_init_tail_vmemmap()
3236 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3248 h = m->hstate; in gather_bootmem_prealloc_node()
3263 list_add(&folio->lru, &folio_list); in gather_bootmem_prealloc_node()
3267 * in order to fix confusing memory reports from free(1) and in gather_bootmem_prealloc_node()
3268 * other side-effects, like CommitLimit going negative. in gather_bootmem_prealloc_node()
3277 static void __init gather_bootmem_prealloc_parallel(unsigned long start, in gather_bootmem_prealloc_parallel() argument
3282 for (nid = start; nid < end; nid++) in gather_bootmem_prealloc_parallel()
3291 .start = 0, in gather_bootmem_prealloc()
3308 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3320 list_add(&folio->lru, &folio_list); in hugetlb_hstate_alloc_pages_onenode()
3328 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3333 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3334 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3335 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3344 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages_specific_nodes()
3355 if (allocated < h->max_huge_pages) { in hugetlb_hstate_alloc_pages_errcheck()
3360 h->max_huge_pages, buf, allocated); in hugetlb_hstate_alloc_pages_errcheck()
3361 h->max_huge_pages = allocated; in hugetlb_hstate_alloc_pages_errcheck()
3365 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg) in hugetlb_pages_alloc_boot_node() argument
3368 int i, num = end - start; in hugetlb_pages_alloc_boot_node()
3373 /* Bit mask controlling how hard we retry per-node allocations.*/ in hugetlb_pages_alloc_boot_node()
3382 list_move(&folio->lru, &folio_list); in hugetlb_pages_alloc_boot_node()
3393 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_gigantic_pages_alloc_boot()
3411 job.start = 0; in hugetlb_pages_alloc_boot()
3412 job.size = h->max_huge_pages; in hugetlb_pages_alloc_boot()
3425 * +------------+-------+-------+-------+-------+-------+ in hugetlb_pages_alloc_boot()
3427 * +------------+-------+-------+-------+-------+-------+ in hugetlb_pages_alloc_boot()
3431 * +------------+-------+-------+-------+-------+-------+ in hugetlb_pages_alloc_boot()
3434 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; in hugetlb_pages_alloc_boot()
3437 return h->nr_huge_pages; in hugetlb_pages_alloc_boot()
3442 * non-gigantic pages.
3443 * - For gigantic pages, this is called early in the boot process and
3447 * - For non-gigantic pages, this is called later in the boot process after
3494 * Set demote order for each hstate. Note that in hugetlb_init_hstates()
3495 * h->demote_order is initially 0. in hugetlb_init_hstates()
3496 * - We can not demote gigantic pages if runtime freeing in hugetlb_init_hstates()
3498 * - If CMA allocation is possible, we can not demote in hugetlb_init_hstates()
3503 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3508 if (h2->order < h->order && in hugetlb_init_hstates()
3509 h2->order > h->demote_order) in hugetlb_init_hstates()
3510 h->demote_order = h2->order; in hugetlb_init_hstates()
3523 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", in report_hugepages()
3524 buf, h->free_huge_pages); in report_hugepages()
3546 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3548 if (count >= h->nr_huge_pages) in try_to_free_low()
3553 list_add(&folio->lru, &page_list); in try_to_free_low()
3570 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3571 * balanced by operating on them in a round-robin fashion.
3580 VM_BUG_ON(delta != -1 && delta != 1); in adjust_pool_surplus()
3583 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3584 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3589 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3590 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3597 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3598 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3602 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3613 * Bit mask controlling how hard we retry per-node allocations. in set_max_huge_pages()
3620 return -ENOMEM; in set_max_huge_pages()
3626 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3639 count += persistent_huge_pages(h) - in set_max_huge_pages()
3640 (h->nr_huge_pages_node[nid] - in set_max_huge_pages()
3641 h->surplus_huge_pages_node[nid]); in set_max_huge_pages()
3662 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3664 return -EINVAL; in set_max_huge_pages()
3680 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3681 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3699 &h->next_nid_to_alloc); in set_max_huge_pages()
3706 list_add(&folio->lru, &page_list); in set_max_huge_pages()
3709 /* Bail for signals. Probably ctrl-c from user */ in set_max_huge_pages()
3741 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3753 list_add(&folio->lru, &page_list); in set_max_huge_pages()
3766 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3768 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3791 * Note that we already hold src->resize_lock. To prevent deadlock, in demote_free_hugetlb_folios()
3794 mutex_lock(&dst->resize_lock); in demote_free_hugetlb_folios()
3802 list_del(&folio->lru); in demote_free_hugetlb_folios()
3804 split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); in demote_free_hugetlb_folios()
3810 page->mapping = NULL; in demote_free_hugetlb_folios()
3812 prep_compound_page(page, dst->order); in demote_free_hugetlb_folios()
3815 list_add(&page->lru, &dst_list); in demote_free_hugetlb_folios()
3821 mutex_unlock(&dst->resize_lock); in demote_free_hugetlb_folios()
3837 /* We should never get here if no demote order */ in demote_pool_huge_page()
3838 if (!src->demote_order) { in demote_pool_huge_page()
3839 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); in demote_pool_huge_page()
3840 return -EINVAL; /* internal error */ in demote_pool_huge_page()
3842 dst = size_to_hstate(PAGE_SIZE << src->demote_order); in demote_pool_huge_page()
3848 list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { in demote_pool_huge_page()
3853 list_add(&folio->lru, &list); in demote_pool_huge_page()
3866 list_del(&folio->lru); in demote_pool_huge_page()
3869 nr_demoted--; in demote_pool_huge_page()
3880 src->max_huge_pages -= nr_demoted; in demote_pool_huge_page()
3881 dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); in demote_pool_huge_page()
3890 * Return -EBUSY so that caller will not retry. in demote_pool_huge_page()
3892 return -EBUSY; in demote_pool_huge_page()
3932 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3934 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3947 return -EINVAL; in __nr_hugepages_store_common()
4005 * hstate attribute for optionally mempolicy-based constraint on persistent
4028 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
4039 return -EINVAL; in nr_overcommit_hugepages_store()
4046 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
4062 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
4064 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
4074 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
4087 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
4089 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
4118 mutex_lock(&h->resize_lock); in demote_store()
4129 nr_available = h->free_huge_pages_node[nid]; in demote_store()
4131 nr_available = h->free_huge_pages; in demote_store()
4132 nr_available -= h->resv_huge_pages; in demote_store()
4142 nr_demote -= rc; in demote_store()
4146 mutex_unlock(&h->resize_lock); in demote_store()
4158 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
4175 return -EINVAL; in demote_size_store()
4176 demote_order = demote_hstate->order; in demote_size_store()
4178 return -EINVAL; in demote_size_store()
4180 /* demote order must be smaller than hstate order */ in demote_size_store()
4182 if (demote_order >= h->order) in demote_size_store()
4183 return -EINVAL; in demote_size_store()
4186 mutex_lock(&h->resize_lock); in demote_size_store()
4187 h->demote_order = demote_order; in demote_size_store()
4188 mutex_unlock(&h->resize_lock); in demote_size_store()
4227 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
4229 return -ENOMEM; in hugetlb_sysfs_add_hstate()
4238 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
4242 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
4257 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4284 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4285 * Returns node id via non-NULL nidp.
4295 if (nhs->hstate_kobjs[i] == kobj) { in kobj_to_node_hstate()
4308 * No-op if no hstate attributes attached.
4313 struct node_hstate *nhs = &node_hstates[node->dev.id]; in hugetlb_unregister_node()
4315 if (!nhs->hugepages_kobj) in hugetlb_unregister_node()
4320 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; in hugetlb_unregister_node()
4324 if (h->demote_order) in hugetlb_unregister_node()
4328 nhs->hstate_kobjs[idx] = NULL; in hugetlb_unregister_node()
4331 kobject_put(nhs->hugepages_kobj); in hugetlb_unregister_node()
4332 nhs->hugepages_kobj = NULL; in hugetlb_unregister_node()
4338 * No-op if attributes already registered.
4343 struct node_hstate *nhs = &node_hstates[node->dev.id]; in hugetlb_register_node()
4349 if (nhs->hugepages_kobj) in hugetlb_register_node()
4352 nhs->hugepages_kobj = kobject_create_and_add("hugepages", in hugetlb_register_node()
4353 &node->dev.kobj); in hugetlb_register_node()
4354 if (!nhs->hugepages_kobj) in hugetlb_register_node()
4358 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
4359 nhs->hstate_kobjs, in hugetlb_register_node()
4363 h->name, node->dev.id); in hugetlb_register_node()
4372 * devices of nodes that have memory. All on-line nodes should have
4388 *nidp = -1; in kobj_to_node_hstate()
4417 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4441 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); in hugetlb_init()
4511 void __init hugetlb_add_hstate(unsigned int order) in hugetlb_add_hstate() argument
4516 if (size_to_hstate(PAGE_SIZE << order)) { in hugetlb_add_hstate()
4520 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); in hugetlb_add_hstate()
4522 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); in hugetlb_add_hstate()
4523 h->order = order; in hugetlb_add_hstate()
4524 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4526 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4527 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4528 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4529 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4530 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4548 parsed_hstate->max_huge_pages = 0; in hugepages_clear_pages_in_node()
4549 memset(parsed_hstate->max_huge_pages_node, 0, in hugepages_clear_pages_in_node()
4550 sizeof(parsed_hstate->max_huge_pages_node)); in hugepages_clear_pages_in_node()
4585 mhp = &parsed_hstate->max_huge_pages; in hugepages_setup()
4612 parsed_hstate->max_huge_pages_node[node] = tmp; in hugepages_setup()
4691 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); in hugepagesz_setup()
4719 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); in default_hugepagesz_setup()
4750 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4770 * In order to avoid races with __do_proc_doulongvec_minmax(), we in proc_hugetlb_doulongvec_minmax()
4784 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4788 return -EOPNOTSUPP; in hugetlb_sysctl_handler_common()
4827 return -EOPNOTSUPP; in hugetlb_overcommit_handler()
4829 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4832 return -EINVAL; in hugetlb_overcommit_handler()
4841 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4896 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4908 h->free_huge_pages, in hugetlb_report_meminfo()
4909 h->resv_huge_pages, in hugetlb_report_meminfo()
4910 h->surplus_huge_pages, in hugetlb_report_meminfo()
4928 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4929 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4930 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4943 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4944 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4945 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4952 K(atomic_long_read(&mm->hugetlb_usage))); in hugetlb_report_usage()
4962 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4968 int ret = -ENOMEM; in hugetlb_acct_memory()
4986 * undesirable. However, in order to preserve some of the semantics, in hugetlb_acct_memory()
5009 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
5031 kref_get(&resv->refs); in hugetlb_vm_op_open()
5040 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_vm_op_open()
5041 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vm_op_open()
5044 if (vma_lock->vma != vma) { in hugetlb_vm_op_open()
5045 vma->vm_private_data = NULL; in hugetlb_vm_op_open()
5059 unsigned long reserve, start, end; in hugetlb_vm_op_close() local
5068 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
5069 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
5071 reserve = (end - start) - region_count(resv, start, end); in hugetlb_vm_op_close()
5072 hugetlb_cgroup_uncharge_counter(resv, start, end); in hugetlb_vm_op_close()
5079 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
5082 kref_put(&resv->refs, resv_map_release); in hugetlb_vm_op_close()
5088 return -EINVAL; in hugetlb_vm_op_split()
5091 * PMD sharing is only possible for PUD_SIZE-aligned address ranges in hugetlb_vm_op_split()
5104 if (floor >= vma->vm_start && ceil <= vma->vm_end) in hugetlb_vm_op_split()
5118 * handle_mm_fault() to try to instantiate regular-sized pages in the
5151 vma->vm_page_prot))); in make_huge_pte()
5154 vma->vm_page_prot)); in make_huge_pte()
5157 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); in make_huge_pte()
5167 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep))); in set_huge_ptep_writable()
5202 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); in hugetlb_install_folio()
5208 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); in hugetlb_install_folio()
5209 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); in hugetlb_install_folio()
5220 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range()
5230 src_vma->vm_start, in copy_hugetlb_page_range()
5231 src_vma->vm_end); in copy_hugetlb_page_range()
5234 raw_write_seqcount_begin(&src->write_protect_seq); in copy_hugetlb_page_range()
5246 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
5255 ret = -ENOMEM; in copy_hugetlb_page_range()
5275 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5313 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5323 * When pre-allocating the page or copying data, we in copy_hugetlb_page_range()
5354 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
5392 raw_write_seqcount_end(&src->write_protect_seq); in copy_hugetlb_page_range()
5406 struct mm_struct *mm = vma->vm_mm; in move_huge_pte()
5434 struct address_space *mapping = vma->vm_file->f_mapping; in move_hugetlb_page_tables()
5436 struct mm_struct *mm = vma->vm_mm; in move_hugetlb_page_tables()
5445 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in move_hugetlb_page_tables()
5450 flush_cache_range(vma, range.start, range.end); in move_hugetlb_page_tables()
5482 flush_hugetlb_tlb_range(vma, range.start, range.end); in move_hugetlb_page_tables()
5484 flush_hugetlb_tlb_range(vma, old_end - len, old_end); in move_hugetlb_page_tables()
5489 return len + old_addr - old_end; in move_hugetlb_page_tables()
5493 unsigned long start, unsigned long end, in __unmap_hugepage_range() argument
5496 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
5509 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
5520 address = start; in __unmap_hugepage_range()
5549 * If the pte was wr-protected by uffd-wp in any of the in __unmap_hugepage_range()
5551 * drop the uffd-wp bit in this zap, then replace the in __unmap_hugepage_range()
5588 /* Leave a uffd-wp pte marker if needed */ in __unmap_hugepage_range()
5603 if (!h->surplus_huge_pages && __vma_private_lock(vma) && in __unmap_hugepage_range()
5615 * resv->adds_in_progress if it succeeds. If this is not done, in __unmap_hugepage_range()
5661 unsigned long *start, unsigned long *end) in __hugetlb_zap_begin() argument
5663 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ in __hugetlb_zap_begin()
5666 adjust_range_if_pmd_sharing_possible(vma, start, end); in __hugetlb_zap_begin()
5668 if (vma->vm_file) in __hugetlb_zap_begin()
5669 i_mmap_lock_write(vma->vm_file->f_mapping); in __hugetlb_zap_begin()
5675 zap_flags_t zap_flags = details ? details->zap_flags : 0; in __hugetlb_zap_end()
5677 if (!vma->vm_file) /* hugetlbfs_file_mmap error */ in __hugetlb_zap_end()
5695 if (vma->vm_file) in __hugetlb_zap_end()
5696 i_mmap_unlock_write(vma->vm_file->f_mapping); in __hugetlb_zap_end()
5699 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
5706 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in unmap_hugepage_range()
5707 start, end); in unmap_hugepage_range()
5708 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in unmap_hugepage_range()
5710 tlb_gather_mmu(&tlb, vma->vm_mm); in unmap_hugepage_range()
5712 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); in unmap_hugepage_range()
5737 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
5738 vma->vm_pgoff; in unmap_ref_private()
5739 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
5747 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private()
5757 if (iter_vma->vm_flags & VM_MAYSHARE) in unmap_ref_private()
5763 * areas. This is because a future no-page fault on this VMA in unmap_ref_private()
5783 struct vm_area_struct *vma = vmf->vma; in hugetlb_wp()
5784 struct mm_struct *mm = vma->vm_mm; in hugetlb_wp()
5785 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in hugetlb_wp()
5786 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); in hugetlb_wp()
5795 * Never handle CoW for uffd-wp protected pages. It should be only in hugetlb_wp()
5796 * handled when the uffd-wp protection is removed. in hugetlb_wp()
5799 * can trigger this, because hugetlb_fault() will always resolve in hugetlb_wp()
5800 * uffd-wp bit first. in hugetlb_wp()
5806 * hugetlb does not support FOLL_FORCE-style write faults that keep the in hugetlb_wp()
5809 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) in hugetlb_wp()
5813 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_wp()
5814 set_huge_ptep_writable(vma, vmf->address, vmf->pte); in hugetlb_wp()
5824 * If no-one else is actually using this page, we're the exclusive in hugetlb_wp()
5835 if (!PageAnonExclusive(&old_folio->page)) { in hugetlb_wp()
5837 SetPageAnonExclusive(&old_folio->page); in hugetlb_wp()
5840 set_huge_ptep_writable(vma, vmf->address, vmf->pte); in hugetlb_wp()
5846 PageAnonExclusive(&old_folio->page), &old_folio->page); in hugetlb_wp()
5867 spin_unlock(vmf->ptl); in hugetlb_wp()
5868 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); in hugetlb_wp()
5879 struct address_space *mapping = vma->vm_file->f_mapping; in hugetlb_wp()
5893 idx = vma_hugecache_offset(h, vma, vmf->address); in hugetlb_wp()
5898 unmap_ref_private(mm, vma, &old_folio->page, in hugetlb_wp()
5899 vmf->address); in hugetlb_wp()
5903 spin_lock(vmf->ptl); in hugetlb_wp()
5904 vmf->pte = hugetlb_walk(vma, vmf->address, in hugetlb_wp()
5906 if (likely(vmf->pte && in hugetlb_wp()
5907 pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) in hugetlb_wp()
5910 * race occurs while re-acquiring page table in hugetlb_wp()
5929 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { in hugetlb_wp()
5935 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, in hugetlb_wp()
5936 vmf->address + huge_page_size(h)); in hugetlb_wp()
5943 spin_lock(vmf->ptl); in hugetlb_wp()
5944 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); in hugetlb_wp()
5945 if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) { in hugetlb_wp()
5946 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); in hugetlb_wp()
5949 huge_ptep_clear_flush(vma, vmf->address, vmf->pte); in hugetlb_wp()
5951 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); in hugetlb_wp()
5954 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, in hugetlb_wp()
5960 spin_unlock(vmf->ptl); in hugetlb_wp()
5968 restore_reserve_on_error(h, vma, vmf->address, new_folio); in hugetlb_wp()
5973 spin_lock(vmf->ptl); /* Caller expects lock to be held */ in hugetlb_wp()
5985 struct address_space *mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
5999 struct inode *inode = mapping->host; in hugetlb_add_to_page_cache()
6015 * by non-hugetlbfs specific code paths. in hugetlb_add_to_page_cache()
6019 spin_lock(&inode->i_lock); in hugetlb_add_to_page_cache()
6020 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
6021 spin_unlock(&inode->i_lock); in hugetlb_add_to_page_cache()
6036 hugetlb_vma_unlock_read(vmf->vma); in hugetlb_handle_userfault()
6037 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); in hugetlb_handle_userfault()
6062 struct vm_area_struct *vma = vmf->vma; in hugetlb_no_page()
6063 struct mm_struct *mm = vma->vm_mm; in hugetlb_no_page()
6071 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff); in hugetlb_no_page()
6081 current->pid); in hugetlb_no_page()
6090 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); in hugetlb_no_page()
6092 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
6093 if (vmf->pgoff >= size) in hugetlb_no_page()
6099 * without pgtable lock, we need to re-test under in hugetlb_no_page()
6102 * either changed or during-changing ptes and retry in hugetlb_no_page()
6114 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page()
6123 if (!(vma->vm_flags & VM_MAYSHARE)) { in hugetlb_no_page()
6129 folio = alloc_hugetlb_folio(vma, vmf->address, 0); in hugetlb_no_page()
6143 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) in hugetlb_no_page()
6149 folio_zero_user(folio, vmf->real_address); in hugetlb_no_page()
6153 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
6155 vmf->pgoff); in hugetlb_no_page()
6158 * err can't be -EEXIST which implies someone in hugetlb_no_page()
6164 restore_reserve_on_error(h, vma, vmf->address, in hugetlb_no_page()
6192 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page()
6207 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
6208 if (vma_needs_reservation(h, vma, vmf->address) < 0) { in hugetlb_no_page()
6213 vma_end_reservation(h, vma, vmf->address); in hugetlb_no_page()
6216 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); in hugetlb_no_page()
6219 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) in hugetlb_no_page()
6223 hugetlb_add_new_anon_rmap(folio, vma, vmf->address); in hugetlb_no_page()
6226 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
6227 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
6229 * If this pte was previously wr-protected, keep it wr-protected even in hugetlb_no_page()
6232 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte))) in hugetlb_no_page()
6234 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); in hugetlb_no_page()
6237 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
6242 spin_unlock(vmf->ptl); in hugetlb_no_page()
6257 * We must check to release the per-VMA lock. __vmf_anon_prepare() is in hugetlb_no_page()
6267 spin_unlock(vmf->ptl); in hugetlb_no_page()
6270 restore_reserve_on_error(h, vma, vmf->address, folio); in hugetlb_no_page()
6288 return hash & (num_fault_mutexes - 1); in hugetlb_fault_mutex_hash()
6331 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
6409 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) { in hugetlb_fault()
6429 /* Handle userfault-wp first, before trying to lock more pages */ in hugetlb_fault()
6490 * We must check to release the per-VMA lock. __vmf_anon_prepare() in in hugetlb_fault()
6528 * that breaking the per-node hugetlb pool is not allowed in this case. in alloc_hugetlb_folio_vma()
6547 struct mm_struct *dst_mm = dst_vma->vm_mm; in hugetlb_mfill_atomic_pte()
6551 struct address_space *mapping = dst_vma->vm_file->f_mapping; in hugetlb_mfill_atomic_pte()
6554 int vm_shared = dst_vma->vm_flags & VM_SHARED; in hugetlb_mfill_atomic_pte()
6557 int ret = -ENOMEM; in hugetlb_mfill_atomic_pte()
6568 return -EEXIST; in hugetlb_mfill_atomic_pte()
6574 /* No need to invalidate - it was non-present before */ in hugetlb_mfill_atomic_pte()
6582 ret = -EFAULT; in hugetlb_mfill_atomic_pte()
6589 * a non-missing case. Return -EEXIST. in hugetlb_mfill_atomic_pte()
6593 ret = -EEXIST; in hugetlb_mfill_atomic_pte()
6599 ret = -ENOMEM; in hugetlb_mfill_atomic_pte()
6608 ret = -ENOENT; in hugetlb_mfill_atomic_pte()
6620 ret = -ENOMEM; in hugetlb_mfill_atomic_pte()
6634 ret = -EEXIST; in hugetlb_mfill_atomic_pte()
6642 ret = -ENOMEM; in hugetlb_mfill_atomic_pte()
6673 ret = -EFAULT; in hugetlb_mfill_atomic_pte()
6674 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) in hugetlb_mfill_atomic_pte()
6691 ret = -EIO; in hugetlb_mfill_atomic_pte()
6697 * registered, we firstly wr-protect a none pte which has no page cache in hugetlb_mfill_atomic_pte()
6700 ret = -EEXIST; in hugetlb_mfill_atomic_pte()
6710 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY in hugetlb_mfill_atomic_pte()
6716 writable = dst_vma->vm_flags & VM_WRITE; in hugetlb_mfill_atomic_pte()
6718 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); in hugetlb_mfill_atomic_pte()
6735 /* No need to invalidate - it was non-present before */ in hugetlb_mfill_atomic_pte()
6762 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
6763 unsigned long start = address; in hugetlb_change_protection() local
6776 * start/end. Set range.start/range.end to cover the maximum possible in hugetlb_change_protection()
6780 0, mm, start, end); in hugetlb_change_protection()
6781 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in hugetlb_change_protection()
6784 flush_cache_range(vma, range.start, range.end); in hugetlb_change_protection()
6788 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
6799 * Userfaultfd wr-protect requires pgtable in hugetlb_change_protection()
6800 * pre-allocations to install pte markers. in hugetlb_change_protection()
6804 pages = -ENOMEM; in hugetlb_change_protection()
6811 * When uffd-wp is enabled on the vma, unshare in hugetlb_change_protection()
6855 /* Safe to modify directly (non-present->none). */ in hugetlb_change_protection()
6863 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); in hugetlb_change_protection()
6873 /* Safe to modify directly (none->non-present). */ in hugetlb_change_protection()
6888 flush_hugetlb_tlb_range(vma, range.start, range.end); in hugetlb_change_protection()
6890 flush_hugetlb_tlb_range(vma, start, end); in hugetlb_change_protection()
6898 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
6902 return pages > 0 ? (pages << h->order) : pages; in hugetlb_change_protection()
6911 long chg = -1, add = -1; in hugetlb_reserve_pages()
6941 * to reserve the full area even if read-only as mprotect() may be in hugetlb_reserve_pages()
6942 * called to make the mapping read-write. Assume !vma is a shm mapping in hugetlb_reserve_pages()
6944 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
6959 chg = to - from; in hugetlb_reserve_pages()
6972 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { in hugetlb_reserve_pages()
7000 * the reservation was consumed. Private mappings are per-VMA and in hugetlb_reserve_pages()
7006 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
7010 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
7024 * reference to h_cg->css. See comment below for detail. in hugetlb_reserve_pages()
7028 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
7031 chg - add); in hugetlb_reserve_pages()
7032 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
7036 * h_cg->css. So we should release the reference held in hugetlb_reserve_pages()
7053 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
7060 kref_put(&resv_map->refs, resv_map_release); in hugetlb_reserve_pages()
7066 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, in hugetlb_unreserve_pages() argument
7080 chg = region_del(resv_map, start, end); in hugetlb_unreserve_pages()
7090 spin_lock(&inode->i_lock); in hugetlb_unreserve_pages()
7091 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
7092 spin_unlock(&inode->i_lock); in hugetlb_unreserve_pages()
7098 * Note that !resv_map implies freed == 0. So (chg - freed) in hugetlb_unreserve_pages()
7101 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); in hugetlb_unreserve_pages()
7102 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
7112 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + in page_table_shareable()
7113 svma->vm_start; in page_table_shareable()
7118 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; in page_table_shareable()
7119 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; in page_table_shareable()
7130 !svma->vm_private_data) in page_table_shareable()
7138 unsigned long start = addr & PUD_MASK; in want_pmd_share() local
7139 unsigned long end = start + PUD_SIZE; in want_pmd_share()
7148 if (!(vma->vm_flags & VM_MAYSHARE)) in want_pmd_share()
7150 if (!vma->vm_private_data) /* vma lock required for sharing */ in want_pmd_share()
7152 if (!range_in_vma(vma, start, end)) in want_pmd_share()
7158 * Determine if start,end range within vma could be mapped by shared pmd.
7159 * If yes, adjust start and end to cover range associated with possible
7163 unsigned long *start, unsigned long *end) in adjust_range_if_pmd_sharing_possible() argument
7165 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), in adjust_range_if_pmd_sharing_possible()
7166 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); in adjust_range_if_pmd_sharing_possible()
7172 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || in adjust_range_if_pmd_sharing_possible()
7173 (*end <= v_start) || (*start >= v_end)) in adjust_range_if_pmd_sharing_possible()
7177 if (*start > v_start) in adjust_range_if_pmd_sharing_possible()
7178 *start = ALIGN_DOWN(*start, PUD_SIZE); in adjust_range_if_pmd_sharing_possible()
7189 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7196 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
7197 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
7198 vma->vm_pgoff; in huge_pmd_share()
7205 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { in huge_pmd_share()
7223 spin_lock(&mm->page_table_lock); in huge_pmd_share()
7231 spin_unlock(&mm->page_table_lock); in huge_pmd_share()
7257 i_mmap_assert_write_locked(vma->vm_file->f_mapping); in huge_pmd_unshare()
7284 unsigned long *start, unsigned long *end) in adjust_range_if_pmd_sharing_possible() argument
7330 * huge_pte_offset() - Walk the page table to resolve the hugepage
7355 /* must be pud huge, non-present or none */ in huge_pte_offset()
7362 /* must be pmd huge, non-present or none */ in huge_pte_offset()
7368 * page in a page table page mapping size. Used to skip non-present
7378 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page()
7380 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
7392 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
7411 list_move_tail(&folio->lru, list); in isolate_hugetlb()
7430 ret = -EBUSY; in get_hwpoison_hugetlb_folio()
7451 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); in folio_putback_active_hugetlb()
7461 set_page_owner_migrate_reason(&new_folio->page, reason); in move_hugetlb_state()
7469 * Also note that we have to transfer the per-node surplus state in move_hugetlb_state()
7471 * the per-node's. in move_hugetlb_state()
7482 * There is no need to transfer the per-node surplus state in move_hugetlb_state()
7488 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7489 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7490 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7497 unsigned long start, in hugetlb_unshare_pmds() argument
7502 struct mm_struct *mm = vma->vm_mm; in hugetlb_unshare_pmds()
7508 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_unshare_pmds()
7511 if (start >= end) in hugetlb_unshare_pmds()
7514 flush_cache_range(vma, start, end); in hugetlb_unshare_pmds()
7520 start, end); in hugetlb_unshare_pmds()
7523 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_unshare_pmds()
7524 for (address = start; address < end; address += PUD_SIZE) { in hugetlb_unshare_pmds()
7532 flush_hugetlb_tlb_range(vma, start, end); in hugetlb_unshare_pmds()
7533 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_unshare_pmds()
7548 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), in hugetlb_unshare_all_pmds()
7549 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); in hugetlb_unshare_all_pmds()
7594 void __init hugetlb_cma_reserve(int order) in hugetlb_cma_reserve() argument
7606 VM_WARN_ON(order <= MAX_PAGE_ORDER); in hugetlb_cma_reserve()
7618 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; in hugetlb_cma_reserve()
7623 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { in hugetlb_cma_reserve()
7625 nid, (PAGE_SIZE << order) / SZ_1M); in hugetlb_cma_reserve()
7626 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; in hugetlb_cma_reserve()
7637 if (hugetlb_cma_size < (PAGE_SIZE << order)) { in hugetlb_cma_reserve()
7639 (PAGE_SIZE << order) / SZ_1M); in hugetlb_cma_reserve()
7665 size = min(per_node, hugetlb_cma_size - reserved); in hugetlb_cma_reserve()
7668 size = round_up(size, PAGE_SIZE << order); in hugetlb_cma_reserve()
7672 * Note that 'order per bit' is based on smallest size that in hugetlb_cma_reserve()
7677 PAGE_SIZE << order, in hugetlb_cma_reserve()