xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
2 #define JEMALLOC_INTERNAL_PROF_INLINES_B_H
3 
4 #include "jemalloc/internal/safety_check.h"
5 #include "jemalloc/internal/sz.h"
6 
7 JEMALLOC_ALWAYS_INLINE bool
8 prof_gdump_get_unlocked(void) {
9 	/*
10 	 * No locking is used when reading prof_gdump_val in the fast path, so
11 	 * there are no guarantees regarding how long it will take for all
12 	 * threads to notice state changes.
13 	 */
14 	return prof_gdump_val;
15 }
16 
17 JEMALLOC_ALWAYS_INLINE prof_tdata_t *
18 prof_tdata_get(tsd_t *tsd, bool create) {
19 	prof_tdata_t *tdata;
20 
21 	cassert(config_prof);
22 
23 	tdata = tsd_prof_tdata_get(tsd);
24 	if (create) {
25 		if (unlikely(tdata == NULL)) {
26 			if (tsd_nominal(tsd)) {
27 				tdata = prof_tdata_init(tsd);
28 				tsd_prof_tdata_set(tsd, tdata);
29 			}
30 		} else if (unlikely(tdata->expired)) {
31 			tdata = prof_tdata_reinit(tsd, tdata);
32 			tsd_prof_tdata_set(tsd, tdata);
33 		}
34 		assert(tdata == NULL || tdata->attached);
35 	}
36 
37 	return tdata;
38 }
39 
40 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
41 prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
42 	cassert(config_prof);
43 	assert(ptr != NULL);
44 
45 	return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
46 }
47 
48 JEMALLOC_ALWAYS_INLINE void
49 prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
50     alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
51 	cassert(config_prof);
52 	assert(ptr != NULL);
53 
54 	arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
55 }
56 
57 JEMALLOC_ALWAYS_INLINE void
58 prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
59 	cassert(config_prof);
60 	assert(ptr != NULL);
61 
62 	arena_prof_tctx_reset(tsdn, ptr, tctx);
63 }
64 
65 JEMALLOC_ALWAYS_INLINE nstime_t
66 prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
67 	cassert(config_prof);
68 	assert(ptr != NULL);
69 
70 	return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
71 }
72 
73 JEMALLOC_ALWAYS_INLINE void
74 prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
75     nstime_t t) {
76 	cassert(config_prof);
77 	assert(ptr != NULL);
78 
79 	arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
80 }
81 
82 JEMALLOC_ALWAYS_INLINE bool
83 prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
84 	ssize_t check = update ? 0 : usize;
85 
86 	int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
87 	if (update) {
88 		bytes_until_sample -= usize;
89 		if (tsd_nominal(tsd)) {
90 			tsd_bytes_until_sample_set(tsd, bytes_until_sample);
91 		}
92 	}
93 	if (likely(bytes_until_sample >= check)) {
94 		return true;
95 	}
96 
97 	return false;
98 }
99 
100 JEMALLOC_ALWAYS_INLINE bool
101 prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
102 			 prof_tdata_t **tdata_out) {
103 	prof_tdata_t *tdata;
104 
105 	cassert(config_prof);
106 
107 	/* Fastpath: no need to load tdata */
108 	if (likely(prof_sample_check(tsd, usize, update))) {
109 		return true;
110 	}
111 
112 	bool booted = tsd_prof_tdata_get(tsd);
113 	tdata = prof_tdata_get(tsd, true);
114 	if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
115 		tdata = NULL;
116 	}
117 
118 	if (tdata_out != NULL) {
119 		*tdata_out = tdata;
120 	}
121 
122 	if (unlikely(tdata == NULL)) {
123 		return true;
124 	}
125 
126 	/*
127 	 * If this was the first creation of tdata, then
128 	 * prof_tdata_get() reset bytes_until_sample, so decrement and
129 	 * check it again
130 	 */
131 	if (!booted && prof_sample_check(tsd, usize, update)) {
132 		return true;
133 	}
134 
135 	if (tsd_reentrancy_level_get(tsd) > 0) {
136 		return true;
137 	}
138 	/* Compute new sample threshold. */
139 	if (update) {
140 		prof_sample_threshold_update(tdata);
141 	}
142 	return !tdata->active;
143 }
144 
145 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
146 prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
147 	prof_tctx_t *ret;
148 	prof_tdata_t *tdata;
149 	prof_bt_t bt;
150 
151 	assert(usize == sz_s2u(usize));
152 
153 	if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
154 	    &tdata))) {
155 		ret = (prof_tctx_t *)(uintptr_t)1U;
156 	} else {
157 		bt_init(&bt, tdata->vec);
158 		prof_backtrace(&bt);
159 		ret = prof_lookup(tsd, &bt);
160 	}
161 
162 	return ret;
163 }
164 
165 JEMALLOC_ALWAYS_INLINE void
166 prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
167     prof_tctx_t *tctx) {
168 	cassert(config_prof);
169 	assert(ptr != NULL);
170 	assert(usize == isalloc(tsdn, ptr));
171 
172 	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
173 		prof_malloc_sample_object(tsdn, ptr, usize, tctx);
174 	} else {
175 		prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
176 		    (prof_tctx_t *)(uintptr_t)1U);
177 	}
178 }
179 
180 JEMALLOC_ALWAYS_INLINE void
181 prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
182     bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
183     prof_tctx_t *old_tctx) {
184 	bool sampled, old_sampled, moved;
185 
186 	cassert(config_prof);
187 	assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
188 
189 	if (prof_active && !updated && ptr != NULL) {
190 		assert(usize == isalloc(tsd_tsdn(tsd), ptr));
191 		if (prof_sample_accum_update(tsd, usize, true, NULL)) {
192 			/*
193 			 * Don't sample.  The usize passed to prof_alloc_prep()
194 			 * was larger than what actually got allocated, so a
195 			 * backtrace was captured for this allocation, even
196 			 * though its actual usize was insufficient to cross the
197 			 * sample threshold.
198 			 */
199 			prof_alloc_rollback(tsd, tctx, true);
200 			tctx = (prof_tctx_t *)(uintptr_t)1U;
201 		}
202 	}
203 
204 	sampled = ((uintptr_t)tctx > (uintptr_t)1U);
205 	old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
206 	moved = (ptr != old_ptr);
207 
208 	if (unlikely(sampled)) {
209 		prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
210 	} else if (moved) {
211 		prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
212 		    (prof_tctx_t *)(uintptr_t)1U);
213 	} else if (unlikely(old_sampled)) {
214 		/*
215 		 * prof_tctx_set() would work for the !moved case as well, but
216 		 * prof_tctx_reset() is slightly cheaper, and the proper thing
217 		 * to do here in the presence of explicit knowledge re: moved
218 		 * state.
219 		 */
220 		prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
221 	} else {
222 		assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
223 		    (uintptr_t)1U);
224 	}
225 
226 	/*
227 	 * The prof_free_sampled_object() call must come after the
228 	 * prof_malloc_sample_object() call, because tctx and old_tctx may be
229 	 * the same, in which case reversing the call order could cause the tctx
230 	 * to be prematurely destroyed as a side effect of momentarily zeroed
231 	 * counters.
232 	 */
233 	if (unlikely(old_sampled)) {
234 		prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
235 	}
236 }
237 
238 JEMALLOC_ALWAYS_INLINE void
239 prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
240 	prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
241 
242 	cassert(config_prof);
243 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
244 
245 	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
246 		prof_free_sampled_object(tsd, ptr, usize, tctx);
247 	}
248 }
249 
250 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
251