xref: /freebsd/contrib/jemalloc/src/ehooks.c (revision c43cad87172039ccf38172129c79755ea79e6102)
1*c43cad87SWarner Losh #include "jemalloc/internal/jemalloc_preamble.h"
2*c43cad87SWarner Losh #include "jemalloc/internal/jemalloc_internal_includes.h"
3*c43cad87SWarner Losh 
4*c43cad87SWarner Losh #include "jemalloc/internal/ehooks.h"
5*c43cad87SWarner Losh #include "jemalloc/internal/extent_mmap.h"
6*c43cad87SWarner Losh 
7*c43cad87SWarner Losh void
8*c43cad87SWarner Losh ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
9*c43cad87SWarner Losh 	/* All other hooks are optional; this one is not. */
10*c43cad87SWarner Losh 	assert(extent_hooks->alloc != NULL);
11*c43cad87SWarner Losh 	ehooks->ind = ind;
12*c43cad87SWarner Losh 	ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
13*c43cad87SWarner Losh }
14*c43cad87SWarner Losh 
15*c43cad87SWarner Losh /*
16*c43cad87SWarner Losh  * If the caller specifies (!*zero), it is still possible to receive zeroed
17*c43cad87SWarner Losh  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
18*c43cad87SWarner Losh  * advantage of this to avoid demanding zeroed extents, but taking advantage of
19*c43cad87SWarner Losh  * them if they are returned.
20*c43cad87SWarner Losh  */
21*c43cad87SWarner Losh static void *
22*c43cad87SWarner Losh extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
23*c43cad87SWarner Losh     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
24*c43cad87SWarner Losh 	void *ret;
25*c43cad87SWarner Losh 
26*c43cad87SWarner Losh 	assert(size != 0);
27*c43cad87SWarner Losh 	assert(alignment != 0);
28*c43cad87SWarner Losh 
29*c43cad87SWarner Losh 	/* "primary" dss. */
30*c43cad87SWarner Losh 	if (have_dss && dss_prec == dss_prec_primary && (ret =
31*c43cad87SWarner Losh 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
32*c43cad87SWarner Losh 	    commit)) != NULL) {
33*c43cad87SWarner Losh 		return ret;
34*c43cad87SWarner Losh 	}
35*c43cad87SWarner Losh 	/* mmap. */
36*c43cad87SWarner Losh 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
37*c43cad87SWarner Losh 	    != NULL) {
38*c43cad87SWarner Losh 		return ret;
39*c43cad87SWarner Losh 	}
40*c43cad87SWarner Losh 	/* "secondary" dss. */
41*c43cad87SWarner Losh 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
42*c43cad87SWarner Losh 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
43*c43cad87SWarner Losh 	    commit)) != NULL) {
44*c43cad87SWarner Losh 		return ret;
45*c43cad87SWarner Losh 	}
46*c43cad87SWarner Losh 
47*c43cad87SWarner Losh 	/* All strategies for allocation failed. */
48*c43cad87SWarner Losh 	return NULL;
49*c43cad87SWarner Losh }
50*c43cad87SWarner Losh 
51*c43cad87SWarner Losh void *
52*c43cad87SWarner Losh ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
53*c43cad87SWarner Losh     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
54*c43cad87SWarner Losh 	arena_t *arena = arena_get(tsdn, arena_ind, false);
55*c43cad87SWarner Losh 	/* NULL arena indicates arena_create. */
56*c43cad87SWarner Losh 	assert(arena != NULL || alignment == HUGEPAGE);
57*c43cad87SWarner Losh 	dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
58*c43cad87SWarner Losh 	    (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
59*c43cad87SWarner Losh 	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
60*c43cad87SWarner Losh 	    zero, commit, dss);
61*c43cad87SWarner Losh 	if (have_madvise_huge && ret) {
62*c43cad87SWarner Losh 		pages_set_thp_state(ret, size);
63*c43cad87SWarner Losh 	}
64*c43cad87SWarner Losh 	return ret;
65*c43cad87SWarner Losh }
66*c43cad87SWarner Losh 
67*c43cad87SWarner Losh static void *
68*c43cad87SWarner Losh ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
69*c43cad87SWarner Losh     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
70*c43cad87SWarner Losh 	return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
71*c43cad87SWarner Losh 	    ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
72*c43cad87SWarner Losh }
73*c43cad87SWarner Losh 
74*c43cad87SWarner Losh bool
75*c43cad87SWarner Losh ehooks_default_dalloc_impl(void *addr, size_t size) {
76*c43cad87SWarner Losh 	if (!have_dss || !extent_in_dss(addr)) {
77*c43cad87SWarner Losh 		return extent_dalloc_mmap(addr, size);
78*c43cad87SWarner Losh 	}
79*c43cad87SWarner Losh 	return true;
80*c43cad87SWarner Losh }
81*c43cad87SWarner Losh 
82*c43cad87SWarner Losh static bool
83*c43cad87SWarner Losh ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
84*c43cad87SWarner Losh     bool committed, unsigned arena_ind) {
85*c43cad87SWarner Losh 	return ehooks_default_dalloc_impl(addr, size);
86*c43cad87SWarner Losh }
87*c43cad87SWarner Losh 
88*c43cad87SWarner Losh void
89*c43cad87SWarner Losh ehooks_default_destroy_impl(void *addr, size_t size) {
90*c43cad87SWarner Losh 	if (!have_dss || !extent_in_dss(addr)) {
91*c43cad87SWarner Losh 		pages_unmap(addr, size);
92*c43cad87SWarner Losh 	}
93*c43cad87SWarner Losh }
94*c43cad87SWarner Losh 
95*c43cad87SWarner Losh static void
96*c43cad87SWarner Losh ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
97*c43cad87SWarner Losh     bool committed, unsigned arena_ind) {
98*c43cad87SWarner Losh 	ehooks_default_destroy_impl(addr, size);
99*c43cad87SWarner Losh }
100*c43cad87SWarner Losh 
101*c43cad87SWarner Losh bool
102*c43cad87SWarner Losh ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
103*c43cad87SWarner Losh 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
104*c43cad87SWarner Losh 	    length);
105*c43cad87SWarner Losh }
106*c43cad87SWarner Losh 
107*c43cad87SWarner Losh static bool
108*c43cad87SWarner Losh ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
109*c43cad87SWarner Losh     size_t offset, size_t length, unsigned arena_ind) {
110*c43cad87SWarner Losh 	return ehooks_default_commit_impl(addr, offset, length);
111*c43cad87SWarner Losh }
112*c43cad87SWarner Losh 
113*c43cad87SWarner Losh bool
114*c43cad87SWarner Losh ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
115*c43cad87SWarner Losh 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
116*c43cad87SWarner Losh 	    length);
117*c43cad87SWarner Losh }
118*c43cad87SWarner Losh 
119*c43cad87SWarner Losh static bool
120*c43cad87SWarner Losh ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
121*c43cad87SWarner Losh     size_t offset, size_t length, unsigned arena_ind) {
122*c43cad87SWarner Losh 	return ehooks_default_decommit_impl(addr, offset, length);
123*c43cad87SWarner Losh }
124*c43cad87SWarner Losh 
125*c43cad87SWarner Losh #ifdef PAGES_CAN_PURGE_LAZY
126*c43cad87SWarner Losh bool
127*c43cad87SWarner Losh ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
128*c43cad87SWarner Losh 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
129*c43cad87SWarner Losh 	    length);
130*c43cad87SWarner Losh }
131*c43cad87SWarner Losh 
132*c43cad87SWarner Losh static bool
133*c43cad87SWarner Losh ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
134*c43cad87SWarner Losh     size_t offset, size_t length, unsigned arena_ind) {
135*c43cad87SWarner Losh 	assert(addr != NULL);
136*c43cad87SWarner Losh 	assert((offset & PAGE_MASK) == 0);
137*c43cad87SWarner Losh 	assert(length != 0);
138*c43cad87SWarner Losh 	assert((length & PAGE_MASK) == 0);
139*c43cad87SWarner Losh 	return ehooks_default_purge_lazy_impl(addr, offset, length);
140*c43cad87SWarner Losh }
141*c43cad87SWarner Losh #endif
142*c43cad87SWarner Losh 
143*c43cad87SWarner Losh #ifdef PAGES_CAN_PURGE_FORCED
144*c43cad87SWarner Losh bool
145*c43cad87SWarner Losh ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
146*c43cad87SWarner Losh 	return pages_purge_forced((void *)((uintptr_t)addr +
147*c43cad87SWarner Losh 	    (uintptr_t)offset), length);
148*c43cad87SWarner Losh }
149*c43cad87SWarner Losh 
150*c43cad87SWarner Losh static bool
151*c43cad87SWarner Losh ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
152*c43cad87SWarner Losh     size_t size, size_t offset, size_t length, unsigned arena_ind) {
153*c43cad87SWarner Losh 	assert(addr != NULL);
154*c43cad87SWarner Losh 	assert((offset & PAGE_MASK) == 0);
155*c43cad87SWarner Losh 	assert(length != 0);
156*c43cad87SWarner Losh 	assert((length & PAGE_MASK) == 0);
157*c43cad87SWarner Losh 	return ehooks_default_purge_forced_impl(addr, offset, length);
158*c43cad87SWarner Losh }
159*c43cad87SWarner Losh #endif
160*c43cad87SWarner Losh 
161*c43cad87SWarner Losh bool
162*c43cad87SWarner Losh ehooks_default_split_impl() {
163*c43cad87SWarner Losh 	if (!maps_coalesce) {
164*c43cad87SWarner Losh 		/*
165*c43cad87SWarner Losh 		 * Without retain, only whole regions can be purged (required by
166*c43cad87SWarner Losh 		 * MEM_RELEASE on Windows) -- therefore disallow splitting.  See
167*c43cad87SWarner Losh 		 * comments in extent_head_no_merge().
168*c43cad87SWarner Losh 		 */
169*c43cad87SWarner Losh 		return !opt_retain;
170*c43cad87SWarner Losh 	}
171*c43cad87SWarner Losh 
172*c43cad87SWarner Losh 	return false;
173*c43cad87SWarner Losh }
174*c43cad87SWarner Losh 
175*c43cad87SWarner Losh static bool
176*c43cad87SWarner Losh ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
177*c43cad87SWarner Losh     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
178*c43cad87SWarner Losh 	return ehooks_default_split_impl();
179*c43cad87SWarner Losh }
180*c43cad87SWarner Losh 
181*c43cad87SWarner Losh bool
182*c43cad87SWarner Losh ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
183*c43cad87SWarner Losh 	assert(addr_a < addr_b);
184*c43cad87SWarner Losh 	/*
185*c43cad87SWarner Losh 	 * For non-DSS cases --
186*c43cad87SWarner Losh 	 * a) W/o maps_coalesce, merge is not always allowed (Windows):
187*c43cad87SWarner Losh 	 *   1) w/o retain, never merge (first branch below).
188*c43cad87SWarner Losh 	 *   2) with retain, only merge extents from the same VirtualAlloc
189*c43cad87SWarner Losh 	 *      region (in which case MEM_DECOMMIT is utilized for purging).
190*c43cad87SWarner Losh 	 *
191*c43cad87SWarner Losh 	 * b) With maps_coalesce, it's always possible to merge.
192*c43cad87SWarner Losh 	 *   1) w/o retain, always allow merge (only about dirty / muzzy).
193*c43cad87SWarner Losh 	 *   2) with retain, to preserve the SN / first-fit, merge is still
194*c43cad87SWarner Losh 	 *      disallowed if b is a head extent, i.e. no merging across
195*c43cad87SWarner Losh 	 *      different mmap regions.
196*c43cad87SWarner Losh 	 *
197*c43cad87SWarner Losh 	 * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
198*c43cad87SWarner Losh 	 * sanity checked in the second branch below.
199*c43cad87SWarner Losh 	 */
200*c43cad87SWarner Losh 	if (!maps_coalesce && !opt_retain) {
201*c43cad87SWarner Losh 		return true;
202*c43cad87SWarner Losh 	}
203*c43cad87SWarner Losh 	if (config_debug) {
204*c43cad87SWarner Losh 		edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
205*c43cad87SWarner Losh 		    addr_a);
206*c43cad87SWarner Losh 		bool head_a = edata_is_head_get(a);
207*c43cad87SWarner Losh 		edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
208*c43cad87SWarner Losh 		    addr_b);
209*c43cad87SWarner Losh 		bool head_b = edata_is_head_get(b);
210*c43cad87SWarner Losh 		emap_assert_mapped(tsdn, &arena_emap_global, a);
211*c43cad87SWarner Losh 		emap_assert_mapped(tsdn, &arena_emap_global, b);
212*c43cad87SWarner Losh 		assert(extent_neighbor_head_state_mergeable(head_a, head_b,
213*c43cad87SWarner Losh 		    /* forward */ true));
214*c43cad87SWarner Losh 	}
215*c43cad87SWarner Losh 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
216*c43cad87SWarner Losh 		return true;
217*c43cad87SWarner Losh 	}
218*c43cad87SWarner Losh 
219*c43cad87SWarner Losh 	return false;
220*c43cad87SWarner Losh }
221*c43cad87SWarner Losh 
222*c43cad87SWarner Losh bool
223*c43cad87SWarner Losh ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
224*c43cad87SWarner Losh     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
225*c43cad87SWarner Losh 	tsdn_t *tsdn = tsdn_fetch();
226*c43cad87SWarner Losh 
227*c43cad87SWarner Losh 	return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
228*c43cad87SWarner Losh }
229*c43cad87SWarner Losh 
230*c43cad87SWarner Losh void
231*c43cad87SWarner Losh ehooks_default_zero_impl(void *addr, size_t size) {
232*c43cad87SWarner Losh 	/*
233*c43cad87SWarner Losh 	 * By default, we try to zero out memory using OS-provided demand-zeroed
234*c43cad87SWarner Losh 	 * pages.  If the user has specifically requested hugepages, though, we
235*c43cad87SWarner Losh 	 * don't want to purge in the middle of a hugepage (which would break it
236*c43cad87SWarner Losh 	 * up), so we act conservatively and use memset.
237*c43cad87SWarner Losh 	 */
238*c43cad87SWarner Losh 	bool needs_memset = true;
239*c43cad87SWarner Losh 	if (opt_thp != thp_mode_always) {
240*c43cad87SWarner Losh 		needs_memset = pages_purge_forced(addr, size);
241*c43cad87SWarner Losh 	}
242*c43cad87SWarner Losh 	if (needs_memset) {
243*c43cad87SWarner Losh 		memset(addr, 0, size);
244*c43cad87SWarner Losh 	}
245*c43cad87SWarner Losh }
246*c43cad87SWarner Losh 
247*c43cad87SWarner Losh void
248*c43cad87SWarner Losh ehooks_default_guard_impl(void *guard1, void *guard2) {
249*c43cad87SWarner Losh 	pages_mark_guards(guard1, guard2);
250*c43cad87SWarner Losh }
251*c43cad87SWarner Losh 
252*c43cad87SWarner Losh void
253*c43cad87SWarner Losh ehooks_default_unguard_impl(void *guard1, void *guard2) {
254*c43cad87SWarner Losh 	pages_unmark_guards(guard1, guard2);
255*c43cad87SWarner Losh }
256*c43cad87SWarner Losh 
257*c43cad87SWarner Losh const extent_hooks_t ehooks_default_extent_hooks = {
258*c43cad87SWarner Losh 	ehooks_default_alloc,
259*c43cad87SWarner Losh 	ehooks_default_dalloc,
260*c43cad87SWarner Losh 	ehooks_default_destroy,
261*c43cad87SWarner Losh 	ehooks_default_commit,
262*c43cad87SWarner Losh 	ehooks_default_decommit,
263*c43cad87SWarner Losh #ifdef PAGES_CAN_PURGE_LAZY
264*c43cad87SWarner Losh 	ehooks_default_purge_lazy,
265*c43cad87SWarner Losh #else
266*c43cad87SWarner Losh 	NULL,
267*c43cad87SWarner Losh #endif
268*c43cad87SWarner Losh #ifdef PAGES_CAN_PURGE_FORCED
269*c43cad87SWarner Losh 	ehooks_default_purge_forced,
270*c43cad87SWarner Losh #else
271*c43cad87SWarner Losh 	NULL,
272*c43cad87SWarner Losh #endif
273*c43cad87SWarner Losh 	ehooks_default_split,
274*c43cad87SWarner Losh 	ehooks_default_merge
275*c43cad87SWarner Losh };
276