xref: /freebsd/contrib/jemalloc/src/base.c (revision c99b67a7947ea215f9c1d44ec022680e98920cd1)
1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_mmap.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/sz.h"
9 
10 /******************************************************************************/
11 /* Data. */
12 
13 static base_t	*b0;
14 
15 /******************************************************************************/
16 
17 static void *
18 base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
19 	void *addr;
20 	bool zero = true;
21 	bool commit = true;
22 
23 	assert(size == HUGEPAGE_CEILING(size));
24 
25 	if (extent_hooks == &extent_hooks_default) {
26 		addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
27 	} else {
28 		addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
29 		    &zero, &commit, ind);
30 	}
31 
32 	return addr;
33 }
34 
35 static void
36 base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr,
37     size_t size) {
38 	/*
39 	 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
40 	 * stopping at first success.  This cascade is performed for consistency
41 	 * with the cascade in extent_dalloc_wrapper() because an application's
42 	 * custom hooks may not support e.g. dalloc.  This function is only ever
43 	 * called as a side effect of arena destruction, so although it might
44 	 * seem pointless to do anything besides dalloc here, the application
45 	 * may in fact want the end state of all associated virtual memory to be
46 	 * in some consistent-but-allocated state.
47 	 */
48 	if (extent_hooks == &extent_hooks_default) {
49 		if (!extent_dalloc_mmap(addr, size)) {
50 			return;
51 		}
52 		if (!pages_decommit(addr, size)) {
53 			return;
54 		}
55 		if (!pages_purge_forced(addr, size)) {
56 			return;
57 		}
58 		if (!pages_purge_lazy(addr, size)) {
59 			return;
60 		}
61 		/* Nothing worked.  This should never happen. */
62 		not_reached();
63 	} else {
64 		if (extent_hooks->dalloc != NULL &&
65 		    !extent_hooks->dalloc(extent_hooks, addr, size, true,
66 		    ind)) {
67 			return;
68 		}
69 		if (extent_hooks->decommit != NULL &&
70 		    !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
71 		    ind)) {
72 			return;
73 		}
74 		if (extent_hooks->purge_forced != NULL &&
75 		    !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
76 		    size, ind)) {
77 			return;
78 		}
79 		if (extent_hooks->purge_lazy != NULL &&
80 		    !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
81 		    ind)) {
82 			return;
83 		}
84 		/* Nothing worked.  That's the application's problem. */
85 	}
86 }
87 
88 static void
89 base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
90     size_t size) {
91 	size_t sn;
92 
93 	sn = *extent_sn_next;
94 	(*extent_sn_next)++;
95 
96 	extent_binit(extent, addr, size, sn);
97 }
98 
99 static void *
100 base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
101     size_t alignment) {
102 	void *ret;
103 
104 	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
105 	assert(size == ALIGNMENT_CEILING(size, alignment));
106 
107 	*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
108 	    alignment) - (uintptr_t)extent_addr_get(extent);
109 	ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
110 	assert(extent_bsize_get(extent) >= *gap_size + size);
111 	extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
112 	    *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
113 	    extent_sn_get(extent));
114 	return ret;
115 }
116 
117 static void
118 base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
119     size_t gap_size, void *addr, size_t size) {
120 	if (extent_bsize_get(extent) > 0) {
121 		/*
122 		 * Compute the index for the largest size class that does not
123 		 * exceed extent's size.
124 		 */
125 		szind_t index_floor =
126 		    sz_size2index(extent_bsize_get(extent) + 1) - 1;
127 		extent_heap_insert(&base->avail[index_floor], extent);
128 	}
129 
130 	if (config_stats) {
131 		base->allocated += size;
132 		/*
133 		 * Add one PAGE to base_resident for every page boundary that is
134 		 * crossed by the new allocation.
135 		 */
136 		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
137 		    PAGE_CEILING((uintptr_t)addr - gap_size);
138 		assert(base->allocated <= base->resident);
139 		assert(base->resident <= base->mapped);
140 	}
141 }
142 
143 static void *
144 base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
145     size_t size, size_t alignment) {
146 	void *ret;
147 	size_t gap_size;
148 
149 	ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
150 	base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
151 	return ret;
152 }
153 
154 /*
155  * Allocate a block of virtual memory that is large enough to start with a
156  * base_block_t header, followed by an object of specified size and alignment.
157  * On success a pointer to the initialized base_block_t header is returned.
158  */
159 static base_block_t *
160 base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
161     pszind_t *pind_last, size_t *extent_sn_next, size_t size,
162     size_t alignment) {
163 	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
164 	size_t usize = ALIGNMENT_CEILING(size, alignment);
165 	size_t header_size = sizeof(base_block_t);
166 	size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
167 	    header_size;
168 	/*
169 	 * Create increasingly larger blocks in order to limit the total number
170 	 * of disjoint virtual memory ranges.  Choose the next size in the page
171 	 * size class series (skipping size classes that are not a multiple of
172 	 * HUGEPAGE), or a size large enough to satisfy the requested size and
173 	 * alignment, whichever is larger.
174 	 */
175 	size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
176 	    + usize));
177 	pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
178 	    *pind_last;
179 	size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
180 	size_t block_size = (min_block_size > next_block_size) ? min_block_size
181 	    : next_block_size;
182 	base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
183 	    block_size);
184 	if (block == NULL) {
185 		return NULL;
186 	}
187 	*pind_last = sz_psz2ind(block_size);
188 	block->size = block_size;
189 	block->next = NULL;
190 	assert(block_size >= header_size);
191 	base_extent_init(extent_sn_next, &block->extent,
192 	    (void *)((uintptr_t)block + header_size), block_size - header_size);
193 	return block;
194 }
195 
196 /*
197  * Allocate an extent that is at least as large as specified size, with
198  * specified alignment.
199  */
200 static extent_t *
201 base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
202 	malloc_mutex_assert_owner(tsdn, &base->mtx);
203 
204 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
205 	/*
206 	 * Drop mutex during base_block_alloc(), because an extent hook will be
207 	 * called.
208 	 */
209 	malloc_mutex_unlock(tsdn, &base->mtx);
210 	base_block_t *block = base_block_alloc(extent_hooks, base_ind_get(base),
211 	    &base->pind_last, &base->extent_sn_next, size, alignment);
212 	malloc_mutex_lock(tsdn, &base->mtx);
213 	if (block == NULL) {
214 		return NULL;
215 	}
216 	block->next = base->blocks;
217 	base->blocks = block;
218 	if (config_stats) {
219 		base->allocated += sizeof(base_block_t);
220 		base->resident += PAGE_CEILING(sizeof(base_block_t));
221 		base->mapped += block->size;
222 		assert(base->allocated <= base->resident);
223 		assert(base->resident <= base->mapped);
224 	}
225 	return &block->extent;
226 }
227 
228 base_t *
229 b0get(void) {
230 	return b0;
231 }
232 
233 base_t *
234 base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
235 	pszind_t pind_last = 0;
236 	size_t extent_sn_next = 0;
237 	base_block_t *block = base_block_alloc(extent_hooks, ind, &pind_last,
238 	    &extent_sn_next, sizeof(base_t), QUANTUM);
239 	if (block == NULL) {
240 		return NULL;
241 	}
242 
243 	size_t gap_size;
244 	size_t base_alignment = CACHELINE;
245 	size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
246 	base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
247 	    &gap_size, base_size, base_alignment);
248 	base->ind = ind;
249 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
250 	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
251 	    malloc_mutex_rank_exclusive)) {
252 		base_unmap(extent_hooks, ind, block, block->size);
253 		return NULL;
254 	}
255 	base->pind_last = pind_last;
256 	base->extent_sn_next = extent_sn_next;
257 	base->blocks = block;
258 	for (szind_t i = 0; i < NSIZES; i++) {
259 		extent_heap_new(&base->avail[i]);
260 	}
261 	if (config_stats) {
262 		base->allocated = sizeof(base_block_t);
263 		base->resident = PAGE_CEILING(sizeof(base_block_t));
264 		base->mapped = block->size;
265 		assert(base->allocated <= base->resident);
266 		assert(base->resident <= base->mapped);
267 	}
268 	base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
269 	    base_size);
270 
271 	return base;
272 }
273 
274 void
275 base_delete(base_t *base) {
276 	extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
277 	base_block_t *next = base->blocks;
278 	do {
279 		base_block_t *block = next;
280 		next = block->next;
281 		base_unmap(extent_hooks, base_ind_get(base), block,
282 		    block->size);
283 	} while (next != NULL);
284 }
285 
286 extent_hooks_t *
287 base_extent_hooks_get(base_t *base) {
288 	return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
289 	    ATOMIC_ACQUIRE);
290 }
291 
292 extent_hooks_t *
293 base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
294 	extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
295 	atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
296 	return old_extent_hooks;
297 }
298 
299 static void *
300 base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
301     size_t *esn) {
302 	alignment = QUANTUM_CEILING(alignment);
303 	size_t usize = ALIGNMENT_CEILING(size, alignment);
304 	size_t asize = usize + alignment - QUANTUM;
305 
306 	extent_t *extent = NULL;
307 	malloc_mutex_lock(tsdn, &base->mtx);
308 	for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
309 		extent = extent_heap_remove_first(&base->avail[i]);
310 		if (extent != NULL) {
311 			/* Use existing space. */
312 			break;
313 		}
314 	}
315 	if (extent == NULL) {
316 		/* Try to allocate more space. */
317 		extent = base_extent_alloc(tsdn, base, usize, alignment);
318 	}
319 	void *ret;
320 	if (extent == NULL) {
321 		ret = NULL;
322 		goto label_return;
323 	}
324 
325 	ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
326 	if (esn != NULL) {
327 		*esn = extent_sn_get(extent);
328 	}
329 label_return:
330 	malloc_mutex_unlock(tsdn, &base->mtx);
331 	return ret;
332 }
333 
334 /*
335  * base_alloc() returns zeroed memory, which is always demand-zeroed for the
336  * auto arenas, in order to make multi-page sparse data structures such as radix
337  * tree nodes efficient with respect to physical memory usage.  Upon success a
338  * pointer to at least size bytes with specified alignment is returned.  Note
339  * that size is rounded up to the nearest multiple of alignment to avoid false
340  * sharing.
341  */
342 void *
343 base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
344 	return base_alloc_impl(tsdn, base, size, alignment, NULL);
345 }
346 
347 extent_t *
348 base_alloc_extent(tsdn_t *tsdn, base_t *base) {
349 	size_t esn;
350 	extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
351 	    CACHELINE, &esn);
352 	if (extent == NULL) {
353 		return NULL;
354 	}
355 	extent_esn_set(extent, esn);
356 	return extent;
357 }
358 
359 void
360 base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
361     size_t *mapped) {
362 	cassert(config_stats);
363 
364 	malloc_mutex_lock(tsdn, &base->mtx);
365 	assert(base->allocated <= base->resident);
366 	assert(base->resident <= base->mapped);
367 	*allocated = base->allocated;
368 	*resident = base->resident;
369 	*mapped = base->mapped;
370 	malloc_mutex_unlock(tsdn, &base->mtx);
371 }
372 
373 void
374 base_prefork(tsdn_t *tsdn, base_t *base) {
375 	malloc_mutex_prefork(tsdn, &base->mtx);
376 }
377 
378 void
379 base_postfork_parent(tsdn_t *tsdn, base_t *base) {
380 	malloc_mutex_postfork_parent(tsdn, &base->mtx);
381 }
382 
383 void
384 base_postfork_child(tsdn_t *tsdn, base_t *base) {
385 	malloc_mutex_postfork_child(tsdn, &base->mtx);
386 }
387 
388 bool
389 base_boot(tsdn_t *tsdn) {
390 	b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
391 	return (b0 == NULL);
392 }
393