xref: /freebsd/contrib/jemalloc/src/base.c (revision 63a938566d524836885917d95bd491aa4400b181)
1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/sz.h"
9 
10 /******************************************************************************/
11 /* Data. */
12 
13 static base_t	*b0;
14 
15 /******************************************************************************/
16 
17 static void *
18 base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
19 	void *addr;
20 	bool zero = true;
21 	bool commit = true;
22 
23 	assert(size == HUGEPAGE_CEILING(size));
24 
25 	if (extent_hooks == &extent_hooks_default) {
26 		addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
27 	} else {
28 		/* No arena context as we are creating new arenas. */
29 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
30 		pre_reentrancy(tsd, NULL);
31 		addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
32 		    &zero, &commit, ind);
33 		post_reentrancy(tsd);
34 	}
35 
36 	return addr;
37 }
38 
39 static void
40 base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
41     size_t size) {
42 	/*
43 	 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
44 	 * stopping at first success.  This cascade is performed for consistency
45 	 * with the cascade in extent_dalloc_wrapper() because an application's
46 	 * custom hooks may not support e.g. dalloc.  This function is only ever
47 	 * called as a side effect of arena destruction, so although it might
48 	 * seem pointless to do anything besides dalloc here, the application
49 	 * may in fact want the end state of all associated virtual memory to be
50 	 * in some consistent-but-allocated state.
51 	 */
52 	if (extent_hooks == &extent_hooks_default) {
53 		if (!extent_dalloc_mmap(addr, size)) {
54 			return;
55 		}
56 		if (!pages_decommit(addr, size)) {
57 			return;
58 		}
59 		if (!pages_purge_forced(addr, size)) {
60 			return;
61 		}
62 		if (!pages_purge_lazy(addr, size)) {
63 			return;
64 		}
65 		/* Nothing worked.  This should never happen. */
66 		not_reached();
67 	} else {
68 		tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
69 		pre_reentrancy(tsd, NULL);
70 		if (extent_hooks->dalloc != NULL &&
71 		    !extent_hooks->dalloc(extent_hooks, addr, size, true,
72 		    ind)) {
73 			goto label_done;
74 		}
75 		if (extent_hooks->decommit != NULL &&
76 		    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
77 		    ind)) {
78 			goto label_done;
79 		}
80 		if (extent_hooks->purge_forced != NULL &&
81 		    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
82 		    size, ind)) {
83 			goto label_done;
84 		}
85 		if (extent_hooks->purge_lazy != NULL &&
86 		    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
87 		    ind)) {
88 			goto label_done;
89 		}
90 		/* Nothing worked.  That's the application's problem. */
91 	label_done:
92 		post_reentrancy(tsd);
93 		return;
94 	}
95 }
96 
97 static void
98 base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
99     size_t size) {
100 	size_t sn;
101 
102 	sn = *extent_sn_next;
103 	(*extent_sn_next)++;
104 
105 	extent_binit(extent, addr, size, sn);
106 }
107 
108 static void *
109 base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
110     size_t alignment) {
111 	void *ret;
112 
113 	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
114 	assert(size == ALIGNMENT_CEILING(size, alignment));
115 
116 	*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
117 	    alignment) - (uintptr_t)extent_addr_get(extent);
118 	ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
119 	assert(extent_bsize_get(extent) >= *gap_size + size);
120 	extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
121 	    *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
122 	    extent_sn_get(extent));
123 	return ret;
124 }
125 
126 static void
127 base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
128     size_t gap_size, void *addr, size_t size) {
129 	if (extent_bsize_get(extent) > 0) {
130 		/*
131 		 * Compute the index for the largest size class that does not
132 		 * exceed extent's size.
133 		 */
134 		szind_t index_floor =
135 		    sz_size2index(extent_bsize_get(extent) + 1) - 1;
136 		extent_heap_insert(&base->avail[index_floor], extent);
137 	}
138 
139 	if (config_stats) {
140 		base->allocated += size;
141 		/*
142 		 * Add one PAGE to base_resident for every page boundary that is
143 		 * crossed by the new allocation.
144 		 */
145 		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
146 		    PAGE_CEILING((uintptr_t)addr - gap_size);
147 		assert(base->allocated <= base->resident);
148 		assert(base->resident <= base->mapped);
149 	}
150 }
151 
152 static void *
153 base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
154     size_t size, size_t alignment) {
155 	void *ret;
156 	size_t gap_size;
157 
158 	ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
159 	base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
160 	return ret;
161 }
162 
163 /*
164  * Allocate a block of virtual memory that is large enough to start with a
165  * base_block_t header, followed by an object of specified size and alignment.
166  * On success a pointer to the initialized base_block_t header is returned.
167  */
168 static base_block_t *
169 base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
170     pszind_t *pind_last, size_t *extent_sn_next, size_t size,
171     size_t alignment) {
172 	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
173 	size_t usize = ALIGNMENT_CEILING(size, alignment);
174 	size_t header_size = sizeof(base_block_t);
175 	size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
176 	    header_size;
177 	/*
178 	 * Create increasingly larger blocks in order to limit the total number
179 	 * of disjoint virtual memory ranges.  Choose the next size in the page
180 	 * size class series (skipping size classes that are not a multiple of
181 	 * HUGEPAGE), or a size large enough to satisfy the requested size and
182 	 * alignment, whichever is larger.
183 	 */
184 	size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
185 	    + usize));
186 	pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
187 	    *pind_last;
188 	size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
189 	size_t block_size = (min_block_size > next_block_size) ? min_block_size
190 	    : next_block_size;
191 	base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
192 	    block_size);
193 	if (block == NULL) {
194 		return NULL;
195 	}
196 	*pind_last = sz_psz2ind(block_size);
197 	block->size = block_size;
198 	block->next = NULL;
199 	assert(block_size >= header_size);
200 	base_extent_init(extent_sn_next, &block->extent,
201 	    (void *)((uintptr_t)block + header_size), block_size - header_size);
202 	return block;
203 }
204 
205 /*
206  * Allocate an extent that is at least as large as specified size, with
207  * specified alignment.
208  */
209 static extent_t *
210 base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
211 	malloc_mutex_assert_owner(tsdn, &base->mtx);
212 
213 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
214 	/*
215 	 * Drop mutex during base_block_alloc(), because an extent hook will be
216 	 * called.
217 	 */
218 	malloc_mutex_unlock(tsdn, &base->mtx);
219 	base_block_t *block = base_block_alloc(tsdn, extent_hooks,
220 	    base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
221 	    alignment);
222 	malloc_mutex_lock(tsdn, &base->mtx);
223 	if (block == NULL) {
224 		return NULL;
225 	}
226 	block->next = base->blocks;
227 	base->blocks = block;
228 	if (config_stats) {
229 		base->allocated += sizeof(base_block_t);
230 		base->resident += PAGE_CEILING(sizeof(base_block_t));
231 		base->mapped += block->size;
232 		assert(base->allocated <= base->resident);
233 		assert(base->resident <= base->mapped);
234 	}
235 	return &block->extent;
236 }
237 
238 base_t *
239 b0get(void) {
240 	return b0;
241 }
242 
243 base_t *
244 base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
245 	pszind_t pind_last = 0;
246 	size_t extent_sn_next = 0;
247 	base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
248 	    &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
249 	if (block == NULL) {
250 		return NULL;
251 	}
252 
253 	size_t gap_size;
254 	size_t base_alignment = CACHELINE;
255 	size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
256 	base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
257 	    &gap_size, base_size, base_alignment);
258 	base->ind = ind;
259 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
260 	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
261 	    malloc_mutex_rank_exclusive)) {
262 		base_unmap(tsdn, extent_hooks, ind, block, block->size);
263 		return NULL;
264 	}
265 	base->pind_last = pind_last;
266 	base->extent_sn_next = extent_sn_next;
267 	base->blocks = block;
268 	for (szind_t i = 0; i < NSIZES; i++) {
269 		extent_heap_new(&base->avail[i]);
270 	}
271 	if (config_stats) {
272 		base->allocated = sizeof(base_block_t);
273 		base->resident = PAGE_CEILING(sizeof(base_block_t));
274 		base->mapped = block->size;
275 		assert(base->allocated <= base->resident);
276 		assert(base->resident <= base->mapped);
277 	}
278 	base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
279 	    base_size);
280 
281 	return base;
282 }
283 
284 void
285 base_delete(tsdn_t *tsdn, base_t *base) {
286 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
287 	base_block_t *next = base->blocks;
288 	do {
289 		base_block_t *block = next;
290 		next = block->next;
291 		base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
292 		    block->size);
293 	} while (next != NULL);
294 }
295 
296 extent_hooks_t *
297 base_extent_hooks_get(base_t *base) {
298 	return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
299 	    ATOMIC_ACQUIRE);
300 }
301 
302 extent_hooks_t *
303 base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
304 	extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
305 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
306 	return old_extent_hooks;
307 }
308 
309 static void *
310 base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
311     size_t *esn) {
312 	alignment = QUANTUM_CEILING(alignment);
313 	size_t usize = ALIGNMENT_CEILING(size, alignment);
314 	size_t asize = usize + alignment - QUANTUM;
315 
316 	extent_t *extent = NULL;
317 	malloc_mutex_lock(tsdn, &base->mtx);
318 	for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
319 		extent = extent_heap_remove_first(&base->avail[i]);
320 		if (extent != NULL) {
321 			/* Use existing space. */
322 			break;
323 		}
324 	}
325 	if (extent == NULL) {
326 		/* Try to allocate more space. */
327 		extent = base_extent_alloc(tsdn, base, usize, alignment);
328 	}
329 	void *ret;
330 	if (extent == NULL) {
331 		ret = NULL;
332 		goto label_return;
333 	}
334 
335 	ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
336 	if (esn != NULL) {
337 		*esn = extent_sn_get(extent);
338 	}
339 label_return:
340 	malloc_mutex_unlock(tsdn, &base->mtx);
341 	return ret;
342 }
343 
344 /*
345  * base_alloc() returns zeroed memory, which is always demand-zeroed for the
346  * auto arenas, in order to make multi-page sparse data structures such as radix
347  * tree nodes efficient with respect to physical memory usage.  Upon success a
348  * pointer to at least size bytes with specified alignment is returned.  Note
349  * that size is rounded up to the nearest multiple of alignment to avoid false
350  * sharing.
351  */
352 void *
353 base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
354 	return base_alloc_impl(tsdn, base, size, alignment, NULL);
355 }
356 
357 extent_t *
358 base_alloc_extent(tsdn_t *tsdn, base_t *base) {
359 	size_t esn;
360 	extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
361 	    CACHELINE, &esn);
362 	if (extent == NULL) {
363 		return NULL;
364 	}
365 	extent_esn_set(extent, esn);
366 	return extent;
367 }
368 
369 void
370 base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
371     size_t *mapped) {
372 	cassert(config_stats);
373 
374 	malloc_mutex_lock(tsdn, &base->mtx);
375 	assert(base->allocated <= base->resident);
376 	assert(base->resident <= base->mapped);
377 	*allocated = base->allocated;
378 	*resident = base->resident;
379 	*mapped = base->mapped;
380 	malloc_mutex_unlock(tsdn, &base->mtx);
381 }
382 
383 void
384 base_prefork(tsdn_t *tsdn, base_t *base) {
385 	malloc_mutex_prefork(tsdn, &base->mtx);
386 }
387 
388 void
389 base_postfork_parent(tsdn_t *tsdn, base_t *base) {
390 	malloc_mutex_postfork_parent(tsdn, &base->mtx);
391 }
392 
393 void
394 base_postfork_child(tsdn_t *tsdn, base_t *base) {
395 	malloc_mutex_postfork_child(tsdn, &base->mtx);
396 }
397 
398 bool
399 base_boot(tsdn_t *tsdn) {
400 	b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
401 	return (b0 == NULL);
402 }
403