Lines Matching +full:block +full:- +full:size

32 base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {  in base_map()  argument
38 assert(size == HUGEPAGE_CEILING(size)); in base_map()
41 addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); in base_map()
46 addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, in base_map()
56 size_t size) { in base_unmap() argument
65 * in some consistent-but-allocated state. in base_unmap()
68 if (!extent_dalloc_mmap(addr, size)) { in base_unmap()
71 if (!pages_decommit(addr, size)) { in base_unmap()
74 if (!pages_purge_forced(addr, size)) { in base_unmap()
77 if (!pages_purge_lazy(addr, size)) { in base_unmap()
85 if (extent_hooks->dalloc != NULL && in base_unmap()
86 !extent_hooks->dalloc(extent_hooks, addr, size, true, in base_unmap()
90 if (extent_hooks->decommit != NULL && in base_unmap()
91 !extent_hooks->decommit(extent_hooks, addr, size, 0, size, in base_unmap()
95 if (extent_hooks->purge_forced != NULL && in base_unmap()
96 !extent_hooks->purge_forced(extent_hooks, addr, size, 0, in base_unmap()
97 size, ind)) { in base_unmap()
100 if (extent_hooks->purge_lazy != NULL && in base_unmap()
101 !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, in base_unmap()
113 (size & HUGEPAGE_MASK) == 0); in base_unmap()
114 pages_nohuge(addr, size); in base_unmap()
120 size_t size) { in base_extent_init() argument
126 extent_binit(extent, addr, size, sn); in base_extent_init()
131 base_block_t *b = base->blocks; in base_get_num_blocks()
135 while (b->next != NULL) { in base_get_num_blocks()
137 b = b->next; in base_get_num_blocks()
146 malloc_mutex_assert_owner(tsdn, &base->mtx); in base_auto_thp_switch()
147 if (base->auto_thp_switched) { in base_auto_thp_switch()
150 /* Called when adding a new block. */ in base_auto_thp_switch()
163 base->auto_thp_switched = true; in base_auto_thp_switch()
164 assert(!config_stats || base->n_thp == 0); in base_auto_thp_switch()
166 base_block_t *block = base->blocks; in base_auto_thp_switch() local
167 while (block != NULL) { in base_auto_thp_switch()
168 assert((block->size & HUGEPAGE_MASK) == 0); in base_auto_thp_switch()
169 pages_huge(block, block->size); in base_auto_thp_switch()
171 base->n_thp += HUGEPAGE_CEILING(block->size - in base_auto_thp_switch()
172 extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; in base_auto_thp_switch()
174 block = block->next; in base_auto_thp_switch()
175 assert(block == NULL || (base_ind_get(base) == 0)); in base_auto_thp_switch()
180 base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, in base_extent_bump_alloc_helper() argument
185 assert(size == ALIGNMENT_CEILING(size, alignment)); in base_extent_bump_alloc_helper()
188 alignment) - (uintptr_t)extent_addr_get(extent); in base_extent_bump_alloc_helper()
190 assert(extent_bsize_get(extent) >= *gap_size + size); in base_extent_bump_alloc_helper()
192 *gap_size + size), extent_bsize_get(extent) - *gap_size - size, in base_extent_bump_alloc_helper()
199 void *addr, size_t size) { in base_extent_bump_alloc_post() argument
202 * Compute the index for the largest size class that does not in base_extent_bump_alloc_post()
203 * exceed extent's size. in base_extent_bump_alloc_post()
206 sz_size2index(extent_bsize_get(extent) + 1) - 1; in base_extent_bump_alloc_post()
207 extent_heap_insert(&base->avail[index_floor], extent); in base_extent_bump_alloc_post()
211 base->allocated += size; in base_extent_bump_alloc_post()
217 base->resident += PAGE_CEILING((uintptr_t)addr + size) - in base_extent_bump_alloc_post()
218 PAGE_CEILING((uintptr_t)addr - gap_size); in base_extent_bump_alloc_post()
219 assert(base->allocated <= base->resident); in base_extent_bump_alloc_post()
220 assert(base->resident <= base->mapped); in base_extent_bump_alloc_post()
222 metadata_thp_always || base->auto_thp_switched)) { in base_extent_bump_alloc_post()
223 base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) in base_extent_bump_alloc_post()
224 - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> in base_extent_bump_alloc_post()
226 assert(base->mapped >= base->n_thp << LG_HUGEPAGE); in base_extent_bump_alloc_post()
232 base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, in base_extent_bump_alloc() argument
237 ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); in base_extent_bump_alloc()
238 base_extent_bump_alloc_post(base, extent, gap_size, ret, size); in base_extent_bump_alloc()
243 * Allocate a block of virtual memory that is large enough to start with a
244 * base_block_t header, followed by an object of specified size and alignment.
249 unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, in base_block_alloc() argument
252 size_t usize = ALIGNMENT_CEILING(size, alignment); in base_block_alloc()
254 size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - in base_block_alloc()
258 * of disjoint virtual memory ranges. Choose the next size in the page in base_block_alloc()
259 * size class series (skipping size classes that are not a multiple of in base_block_alloc()
260 * HUGEPAGE), or a size large enough to satisfy the requested size and in base_block_alloc()
270 base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, in base_block_alloc() local
272 if (block == NULL) { in base_block_alloc()
277 void *addr = (void *)block; in base_block_alloc()
285 malloc_mutex_lock(tsdn, &base->mtx); in base_block_alloc()
287 if (base->auto_thp_switched) { in base_block_alloc()
290 malloc_mutex_unlock(tsdn, &base->mtx); in base_block_alloc()
295 block->size = block_size; in base_block_alloc()
296 block->next = NULL; in base_block_alloc()
298 base_extent_init(extent_sn_next, &block->extent, in base_block_alloc()
299 (void *)((uintptr_t)block + header_size), block_size - header_size); in base_block_alloc()
300 return block; in base_block_alloc()
304 * Allocate an extent that is at least as large as specified size, with
308 base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { in base_extent_alloc() argument
309 malloc_mutex_assert_owner(tsdn, &base->mtx); in base_extent_alloc()
316 malloc_mutex_unlock(tsdn, &base->mtx); in base_extent_alloc()
317 base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, in base_extent_alloc() local
318 base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, in base_extent_alloc()
320 malloc_mutex_lock(tsdn, &base->mtx); in base_extent_alloc()
321 if (block == NULL) { in base_extent_alloc()
324 block->next = base->blocks; in base_extent_alloc()
325 base->blocks = block; in base_extent_alloc()
327 base->allocated += sizeof(base_block_t); in base_extent_alloc()
328 base->resident += PAGE_CEILING(sizeof(base_block_t)); in base_extent_alloc()
329 base->mapped += block->size; in base_extent_alloc()
332 && !base->auto_thp_switched)) { in base_extent_alloc()
333 assert(base->n_thp > 0); in base_extent_alloc()
334 base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> in base_extent_alloc()
337 assert(base->allocated <= base->resident); in base_extent_alloc()
338 assert(base->resident <= base->mapped); in base_extent_alloc()
339 assert(base->n_thp << LG_HUGEPAGE <= base->mapped); in base_extent_alloc()
341 return &block->extent; in base_extent_alloc()
353 base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, in base_new() local
355 if (block == NULL) { in base_new()
362 base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, in base_new()
364 base->ind = ind; in base_new()
365 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); in base_new()
366 if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, in base_new()
368 base_unmap(tsdn, extent_hooks, ind, block, block->size); in base_new()
371 base->pind_last = pind_last; in base_new()
372 base->extent_sn_next = extent_sn_next; in base_new()
373 base->blocks = block; in base_new()
374 base->auto_thp_switched = false; in base_new()
376 extent_heap_new(&base->avail[i]); in base_new()
379 base->allocated = sizeof(base_block_t); in base_new()
380 base->resident = PAGE_CEILING(sizeof(base_block_t)); in base_new()
381 base->mapped = block->size; in base_new()
382 base->n_thp = (opt_metadata_thp == metadata_thp_always) && in base_new()
385 assert(base->allocated <= base->resident); in base_new()
386 assert(base->resident <= base->mapped); in base_new()
387 assert(base->n_thp << LG_HUGEPAGE <= base->mapped); in base_new()
389 base_extent_bump_alloc_post(base, &block->extent, gap_size, base, in base_new()
398 base_block_t *next = base->blocks; in base_delete()
400 base_block_t *block = next; in base_delete() local
401 next = block->next; in base_delete()
402 base_unmap(tsdn, extent_hooks, base_ind_get(base), block, in base_delete()
403 block->size); in base_delete()
409 return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, in base_extent_hooks_get()
416 atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); in base_extent_hooks_set()
421 base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, in base_alloc_impl() argument
424 size_t usize = ALIGNMENT_CEILING(size, alignment); in base_alloc_impl()
425 size_t asize = usize + alignment - QUANTUM; in base_alloc_impl()
428 malloc_mutex_lock(tsdn, &base->mtx); in base_alloc_impl()
430 extent = extent_heap_remove_first(&base->avail[i]); in base_alloc_impl()
451 malloc_mutex_unlock(tsdn, &base->mtx); in base_alloc_impl()
456 * base_alloc() returns zeroed memory, which is always demand-zeroed for the
457 * auto arenas, in order to make multi-page sparse data structures such as radix
459 * pointer to at least size bytes with specified alignment is returned. Note
460 * that size is rounded up to the nearest multiple of alignment to avoid false
464 base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { in base_alloc() argument
465 return base_alloc_impl(tsdn, base, size, alignment, NULL); in base_alloc()
485 malloc_mutex_lock(tsdn, &base->mtx); in base_stats_get()
486 assert(base->allocated <= base->resident); in base_stats_get()
487 assert(base->resident <= base->mapped); in base_stats_get()
488 *allocated = base->allocated; in base_stats_get()
489 *resident = base->resident; in base_stats_get()
490 *mapped = base->mapped; in base_stats_get()
491 *n_thp = base->n_thp; in base_stats_get()
492 malloc_mutex_unlock(tsdn, &base->mtx); in base_stats_get()
497 malloc_mutex_prefork(tsdn, &base->mtx); in base_prefork()
502 malloc_mutex_postfork_parent(tsdn, &base->mtx); in base_postfork_parent()
507 malloc_mutex_postfork_child(tsdn, &base->mtx); in base_postfork_child()