Lines Matching refs:spool
105 static inline bool subpool_is_free(struct hugepage_subpool *spool) in subpool_is_free() argument
107 if (spool->count) in subpool_is_free()
109 if (spool->max_hpages != -1) in subpool_is_free()
110 return spool->used_hpages == 0; in subpool_is_free()
111 if (spool->min_hpages != -1) in subpool_is_free()
112 return spool->rsv_hpages == spool->min_hpages; in subpool_is_free()
117 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, in unlock_or_release_subpool() argument
120 spin_unlock_irqrestore(&spool->lock, irq_flags); in unlock_or_release_subpool()
125 if (subpool_is_free(spool)) { in unlock_or_release_subpool()
126 if (spool->min_hpages != -1) in unlock_or_release_subpool()
127 hugetlb_acct_memory(spool->hstate, in unlock_or_release_subpool()
128 -spool->min_hpages); in unlock_or_release_subpool()
129 kfree(spool); in unlock_or_release_subpool()
136 struct hugepage_subpool *spool; in hugepage_new_subpool() local
138 spool = kzalloc(sizeof(*spool), GFP_KERNEL); in hugepage_new_subpool()
139 if (!spool) in hugepage_new_subpool()
142 spin_lock_init(&spool->lock); in hugepage_new_subpool()
143 spool->count = 1; in hugepage_new_subpool()
144 spool->max_hpages = max_hpages; in hugepage_new_subpool()
145 spool->hstate = h; in hugepage_new_subpool()
146 spool->min_hpages = min_hpages; in hugepage_new_subpool()
149 kfree(spool); in hugepage_new_subpool()
152 spool->rsv_hpages = min_hpages; in hugepage_new_subpool()
154 return spool; in hugepage_new_subpool()
157 void hugepage_put_subpool(struct hugepage_subpool *spool) in hugepage_put_subpool() argument
161 spin_lock_irqsave(&spool->lock, flags); in hugepage_put_subpool()
162 BUG_ON(!spool->count); in hugepage_put_subpool()
163 spool->count--; in hugepage_put_subpool()
164 unlock_or_release_subpool(spool, flags); in hugepage_put_subpool()
175 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, in hugepage_subpool_get_pages() argument
180 if (!spool) in hugepage_subpool_get_pages()
183 spin_lock_irq(&spool->lock); in hugepage_subpool_get_pages()
185 if (spool->max_hpages != -1) { /* maximum size accounting */ in hugepage_subpool_get_pages()
186 if ((spool->used_hpages + delta) <= spool->max_hpages) in hugepage_subpool_get_pages()
187 spool->used_hpages += delta; in hugepage_subpool_get_pages()
195 if (spool->min_hpages != -1 && spool->rsv_hpages) { in hugepage_subpool_get_pages()
196 if (delta > spool->rsv_hpages) { in hugepage_subpool_get_pages()
201 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages()
202 spool->rsv_hpages = 0; in hugepage_subpool_get_pages()
205 spool->rsv_hpages -= delta; in hugepage_subpool_get_pages()
210 spin_unlock_irq(&spool->lock); in hugepage_subpool_get_pages()
220 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, in hugepage_subpool_put_pages() argument
226 if (!spool) in hugepage_subpool_put_pages()
229 spin_lock_irqsave(&spool->lock, flags); in hugepage_subpool_put_pages()
231 if (spool->max_hpages != -1) /* maximum size accounting */ in hugepage_subpool_put_pages()
232 spool->used_hpages -= delta; in hugepage_subpool_put_pages()
235 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { in hugepage_subpool_put_pages()
236 if (spool->rsv_hpages + delta <= spool->min_hpages) in hugepage_subpool_put_pages()
239 ret = spool->rsv_hpages + delta - spool->min_hpages; in hugepage_subpool_put_pages()
241 spool->rsv_hpages += delta; in hugepage_subpool_put_pages()
242 if (spool->rsv_hpages > spool->min_hpages) in hugepage_subpool_put_pages()
243 spool->rsv_hpages = spool->min_hpages; in hugepage_subpool_put_pages()
250 unlock_or_release_subpool(spool, flags); in hugepage_subpool_put_pages()
257 return HUGETLBFS_SB(inode->i_sb)->spool; in subpool_inode()
939 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_fix_reserve_counts() local
943 rsv_adjust = hugepage_subpool_get_pages(spool, 1); in hugetlb_fix_reserve_counts()
1819 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); in free_huge_folio() local
1848 if (hugepage_subpool_put_pages(spool, 1) == 0) in free_huge_folio()
2983 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_hugetlb_folio() local
3023 gbl_chg = hugepage_subpool_get_pages(spool, 1); in alloc_hugetlb_folio()
3087 hugetlb_set_folio_subpool(folio, spool); in alloc_hugetlb_folio()
3105 rsv_adjust = hugepage_subpool_put_pages(spool, 1); in alloc_hugetlb_folio()
3140 hugepage_subpool_put_pages(spool, 1); in alloc_hugetlb_folio()
5088 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close() local
5108 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); in hugetlb_vm_op_close()
6950 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_reserve_pages() local
7021 gbl_reserve = hugepage_subpool_get_pages(spool, chg); in hugetlb_reserve_pages()
7067 rsv_adjust = hugepage_subpool_put_pages(spool, in hugetlb_reserve_pages()
7084 (void)hugepage_subpool_put_pages(spool, chg); in hugetlb_reserve_pages()
7109 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_unreserve_pages() local
7138 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); in hugetlb_unreserve_pages()