xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/ehooks.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_EHOOKS_H
2 #define JEMALLOC_INTERNAL_EHOOKS_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/extent_mmap.h"
6 
7 /*
8  * This module is the internal interface to the extent hooks (both
9  * user-specified and external).  Eventually, this will give us the flexibility
10  * to use multiple different versions of user-visible extent-hook APIs under a
11  * single user interface.
12  *
13  * Current API expansions (not available to anyone but the default hooks yet):
14  *   - Head state tracking.  Hooks can decide whether or not to merge two
15  *     extents based on whether or not one of them is the head (i.e. was
16  *     allocated on its own).  The later extent loses its "head" status.
17  */
18 
19 extern const extent_hooks_t ehooks_default_extent_hooks;
20 
21 typedef struct ehooks_s ehooks_t;
22 struct ehooks_s {
23 	/*
24 	 * The user-visible id that goes with the ehooks (i.e. that of the base
25 	 * they're a part of, the associated arena's index within the arenas
26 	 * array).
27 	 */
28 	unsigned ind;
29 	/* Logically an extent_hooks_t *. */
30 	atomic_p_t ptr;
31 };
32 
33 extern const extent_hooks_t ehooks_default_extent_hooks;
34 
35 /*
36  * These are not really part of the public API.  Each hook has a fast-path for
37  * the default-hooks case that can avoid various small inefficiencies:
38  *   - Forgetting tsd and then calling tsd_get within the hook.
39  *   - Getting more state than necessary out of the extent_t.
40  *   - Doing arena_ind -> arena -> arena_ind lookups.
41  * By making the calls to these functions visible to the compiler, it can move
42  * those extra bits of computation down below the fast-paths where they get ignored.
43  */
44 void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
45     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
46 bool ehooks_default_dalloc_impl(void *addr, size_t size);
47 void ehooks_default_destroy_impl(void *addr, size_t size);
48 bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
49 bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
50 #ifdef PAGES_CAN_PURGE_LAZY
51 bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
52 #endif
53 #ifdef PAGES_CAN_PURGE_FORCED
54 bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
55 #endif
56 bool ehooks_default_split_impl();
57 /*
58  * Merge is the only default extent hook we declare -- see the comment in
59  * ehooks_merge.
60  */
61 bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
62     size_t size_a, void *addr_b, size_t size_b, bool committed,
63     unsigned arena_ind);
64 bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
65 void ehooks_default_zero_impl(void *addr, size_t size);
66 void ehooks_default_guard_impl(void *guard1, void *guard2);
67 void ehooks_default_unguard_impl(void *guard1, void *guard2);
68 
69 /*
70  * We don't officially support reentrancy from wtihin the extent hooks.  But
71  * various people who sit within throwing distance of the jemalloc team want
72  * that functionality in certain limited cases.  The default reentrancy guards
73  * assert that we're not reentrant from a0 (since it's the bootstrap arena,
74  * where reentrant allocations would be redirected), which we would incorrectly
75  * trigger in cases where a0 has extent hooks (those hooks themselves can't be
76  * reentrant, then, but there are reasonable uses for such functionality, like
77  * putting internal metadata on hugepages).  Therefore, we use the raw
78  * reentrancy guards.
79  *
80  * Eventually, we need to think more carefully about whether and where we
81  * support allocating from within extent hooks (and what that means for things
82  * like profiling, stats collection, etc.), and document what the guarantee is.
83  */
84 static inline void
85 ehooks_pre_reentrancy(tsdn_t *tsdn) {
86 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
87 	tsd_pre_reentrancy_raw(tsd);
88 }
89 
90 static inline void
91 ehooks_post_reentrancy(tsdn_t *tsdn) {
92 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
93 	tsd_post_reentrancy_raw(tsd);
94 }
95 
96 /* Beginning of the public API. */
97 void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
98 
99 static inline unsigned
100 ehooks_ind_get(const ehooks_t *ehooks) {
101 	return ehooks->ind;
102 }
103 
104 static inline void
105 ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
106 	atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
107 }
108 
109 static inline extent_hooks_t *
110 ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
111 	return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
112 }
113 
114 static inline bool
115 ehooks_are_default(ehooks_t *ehooks) {
116 	return ehooks_get_extent_hooks_ptr(ehooks) ==
117 	    &ehooks_default_extent_hooks;
118 }
119 
120 /*
121  * In some cases, a caller needs to allocate resources before attempting to call
122  * a hook.  If that hook is doomed to fail, this is wasteful.  We therefore
123  * include some checks for such cases.
124  */
125 static inline bool
126 ehooks_dalloc_will_fail(ehooks_t *ehooks) {
127 	if (ehooks_are_default(ehooks)) {
128 		return opt_retain;
129 	} else {
130 		return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
131 	}
132 }
133 
134 static inline bool
135 ehooks_split_will_fail(ehooks_t *ehooks) {
136 	return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
137 }
138 
139 static inline bool
140 ehooks_merge_will_fail(ehooks_t *ehooks) {
141 	return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
142 }
143 
144 static inline bool
145 ehooks_guard_will_fail(ehooks_t *ehooks) {
146 	/*
147 	 * Before the guard hooks are officially introduced, limit the use to
148 	 * the default hooks only.
149 	 */
150 	return !ehooks_are_default(ehooks);
151 }
152 
153 /*
154  * Some hooks are required to return zeroed memory in certain situations.  In
155  * debug mode, we do some heuristic checks that they did what they were supposed
156  * to.
157  *
158  * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
159  * But incorrect zero information indicates an ehook bug.
160  */
161 static inline void
162 ehooks_debug_zero_check(void *addr, size_t size) {
163 	assert(((uintptr_t)addr & PAGE_MASK) == 0);
164 	assert((size & PAGE_MASK) == 0);
165 	assert(size > 0);
166 	if (config_debug) {
167 		/* Check the whole first page. */
168 		size_t *p = (size_t *)addr;
169 		for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
170 			assert(p[i] == 0);
171 		}
172 		/*
173 		 * And 4 spots within.  There's a tradeoff here; the larger
174 		 * this number, the more likely it is that we'll catch a bug
175 		 * where ehooks return a sparsely non-zero range.  But
176 		 * increasing the number of checks also increases the number of
177 		 * page faults in debug mode.  FreeBSD does much of their
178 		 * day-to-day development work in debug mode, so we don't want
179 		 * even the debug builds to be too slow.
180 		 */
181 		const size_t nchecks = 4;
182 		assert(PAGE >= sizeof(size_t) * nchecks);
183 		for (size_t i = 0; i < nchecks; ++i) {
184 			assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
185 		}
186 	}
187 }
188 
189 
190 static inline void *
191 ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
192     size_t alignment, bool *zero, bool *commit) {
193 	bool orig_zero = *zero;
194 	void *ret;
195 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
196 	if (extent_hooks == &ehooks_default_extent_hooks) {
197 		ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
198 		    alignment, zero, commit, ehooks_ind_get(ehooks));
199 	} else {
200 		ehooks_pre_reentrancy(tsdn);
201 		ret = extent_hooks->alloc(extent_hooks, new_addr, size,
202 		    alignment, zero, commit, ehooks_ind_get(ehooks));
203 		ehooks_post_reentrancy(tsdn);
204 	}
205 	assert(new_addr == NULL || ret == NULL || new_addr == ret);
206 	assert(!orig_zero || *zero);
207 	if (*zero && ret != NULL) {
208 		ehooks_debug_zero_check(ret, size);
209 	}
210 	return ret;
211 }
212 
213 static inline bool
214 ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
215     bool committed) {
216 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
217 	if (extent_hooks == &ehooks_default_extent_hooks) {
218 		return ehooks_default_dalloc_impl(addr, size);
219 	} else if (extent_hooks->dalloc == NULL) {
220 		return true;
221 	} else {
222 		ehooks_pre_reentrancy(tsdn);
223 		bool err = extent_hooks->dalloc(extent_hooks, addr, size,
224 		    committed, ehooks_ind_get(ehooks));
225 		ehooks_post_reentrancy(tsdn);
226 		return err;
227 	}
228 }
229 
230 static inline void
231 ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
232     bool committed) {
233 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
234 	if (extent_hooks == &ehooks_default_extent_hooks) {
235 		ehooks_default_destroy_impl(addr, size);
236 	} else if (extent_hooks->destroy == NULL) {
237 		/* Do nothing. */
238 	} else {
239 		ehooks_pre_reentrancy(tsdn);
240 		extent_hooks->destroy(extent_hooks, addr, size, committed,
241 		    ehooks_ind_get(ehooks));
242 		ehooks_post_reentrancy(tsdn);
243 	}
244 }
245 
246 static inline bool
247 ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
248     size_t offset, size_t length) {
249 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
250 	bool err;
251 	if (extent_hooks == &ehooks_default_extent_hooks) {
252 		err = ehooks_default_commit_impl(addr, offset, length);
253 	} else if (extent_hooks->commit == NULL) {
254 		err = true;
255 	} else {
256 		ehooks_pre_reentrancy(tsdn);
257 		err = extent_hooks->commit(extent_hooks, addr, size,
258 		    offset, length, ehooks_ind_get(ehooks));
259 		ehooks_post_reentrancy(tsdn);
260 	}
261 	if (!err) {
262 		ehooks_debug_zero_check(addr, size);
263 	}
264 	return err;
265 }
266 
267 static inline bool
268 ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
269     size_t offset, size_t length) {
270 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
271 	if (extent_hooks == &ehooks_default_extent_hooks) {
272 		return ehooks_default_decommit_impl(addr, offset, length);
273 	} else if (extent_hooks->decommit == NULL) {
274 		return true;
275 	} else {
276 		ehooks_pre_reentrancy(tsdn);
277 		bool err = extent_hooks->decommit(extent_hooks, addr, size,
278 		    offset, length, ehooks_ind_get(ehooks));
279 		ehooks_post_reentrancy(tsdn);
280 		return err;
281 	}
282 }
283 
284 static inline bool
285 ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
286     size_t offset, size_t length) {
287 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
288 #ifdef PAGES_CAN_PURGE_LAZY
289 	if (extent_hooks == &ehooks_default_extent_hooks) {
290 		return ehooks_default_purge_lazy_impl(addr, offset, length);
291 	}
292 #endif
293 	if (extent_hooks->purge_lazy == NULL) {
294 		return true;
295 	} else {
296 		ehooks_pre_reentrancy(tsdn);
297 		bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
298 		    offset, length, ehooks_ind_get(ehooks));
299 		ehooks_post_reentrancy(tsdn);
300 		return err;
301 	}
302 }
303 
304 static inline bool
305 ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
306     size_t offset, size_t length) {
307 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
308 	/*
309 	 * It would be correct to have a ehooks_debug_zero_check call at the end
310 	 * of this function; purge_forced is required to zero.  But checking
311 	 * would touch the page in question, which may have performance
312 	 * consequences (imagine the hooks are using hugepages, with a global
313 	 * zero page off).  Even in debug mode, it's usually a good idea to
314 	 * avoid cases that can dramatically increase memory consumption.
315 	 */
316 #ifdef PAGES_CAN_PURGE_FORCED
317 	if (extent_hooks == &ehooks_default_extent_hooks) {
318 		return ehooks_default_purge_forced_impl(addr, offset, length);
319 	}
320 #endif
321 	if (extent_hooks->purge_forced == NULL) {
322 		return true;
323 	} else {
324 		ehooks_pre_reentrancy(tsdn);
325 		bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
326 		    offset, length, ehooks_ind_get(ehooks));
327 		ehooks_post_reentrancy(tsdn);
328 		return err;
329 	}
330 }
331 
332 static inline bool
333 ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
334     size_t size_a, size_t size_b, bool committed) {
335 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
336 	if (ehooks_are_default(ehooks)) {
337 		return ehooks_default_split_impl();
338 	} else if (extent_hooks->split == NULL) {
339 		return true;
340 	} else {
341 		ehooks_pre_reentrancy(tsdn);
342 		bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
343 		    size_b, committed, ehooks_ind_get(ehooks));
344 		ehooks_post_reentrancy(tsdn);
345 		return err;
346 	}
347 }
348 
349 static inline bool
350 ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
351     void *addr_b, size_t size_b, bool committed) {
352 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
353 	if (extent_hooks == &ehooks_default_extent_hooks) {
354 		return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
355 	} else if (extent_hooks->merge == NULL) {
356 		return true;
357 	} else {
358 		ehooks_pre_reentrancy(tsdn);
359 		bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
360 		    addr_b, size_b, committed, ehooks_ind_get(ehooks));
361 		ehooks_post_reentrancy(tsdn);
362 		return err;
363 	}
364 }
365 
366 static inline void
367 ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
368 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
369 	if (extent_hooks == &ehooks_default_extent_hooks) {
370 		ehooks_default_zero_impl(addr, size);
371 	} else {
372 		/*
373 		 * It would be correct to try using the user-provided purge
374 		 * hooks (since they are required to have zeroed the extent if
375 		 * they indicate success), but we don't necessarily know their
376 		 * cost.  We'll be conservative and use memset.
377 		 */
378 		memset(addr, 0, size);
379 	}
380 }
381 
382 static inline bool
383 ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
384 	bool err;
385 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
386 
387 	if (extent_hooks == &ehooks_default_extent_hooks) {
388 		ehooks_default_guard_impl(guard1, guard2);
389 		err = false;
390 	} else {
391 		err = true;
392 	}
393 
394 	return err;
395 }
396 
397 static inline bool
398 ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
399 	bool err;
400 	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
401 
402 	if (extent_hooks == &ehooks_default_extent_hooks) {
403 		ehooks_default_unguard_impl(guard1, guard2);
404 		err = false;
405 	} else {
406 		err = true;
407 	}
408 
409 	return err;
410 }
411 
412 #endif /* JEMALLOC_INTERNAL_EHOOKS_H */
413