xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/prof_structs.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
2 #define JEMALLOC_INTERNAL_PROF_STRUCTS_H
3 
4 #include "jemalloc/internal/ckh.h"
5 #include "jemalloc/internal/edata.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/prng.h"
8 #include "jemalloc/internal/rb.h"
9 
10 struct prof_bt_s {
11 	/* Backtrace, stored as len program counters. */
12 	void		**vec;
13 	unsigned	len;
14 };
15 
16 #ifdef JEMALLOC_PROF_LIBGCC
17 /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
18 typedef struct {
19 	void 		**vec;
20 	unsigned	*len;
21 	unsigned	max;
22 } prof_unwind_data_t;
23 #endif
24 
25 struct prof_cnt_s {
26 	/* Profiling counters. */
27 	uint64_t	curobjs;
28 	uint64_t	curobjs_shifted_unbiased;
29 	uint64_t	curbytes;
30 	uint64_t	curbytes_unbiased;
31 	uint64_t	accumobjs;
32 	uint64_t	accumobjs_shifted_unbiased;
33 	uint64_t	accumbytes;
34 	uint64_t	accumbytes_unbiased;
35 };
36 
37 typedef enum {
38 	prof_tctx_state_initializing,
39 	prof_tctx_state_nominal,
40 	prof_tctx_state_dumping,
41 	prof_tctx_state_purgatory /* Dumper must finish destroying. */
42 } prof_tctx_state_t;
43 
44 struct prof_tctx_s {
45 	/* Thread data for thread that performed the allocation. */
46 	prof_tdata_t		*tdata;
47 
48 	/*
49 	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
50 	 * defunct during teardown.
51 	 */
52 	uint64_t		thr_uid;
53 	uint64_t		thr_discrim;
54 
55 	/*
56 	 * Reference count of how many times this tctx object is referenced in
57 	 * recent allocation / deallocation records, protected by tdata->lock.
58 	 */
59 	uint64_t		recent_count;
60 
61 	/* Profiling counters, protected by tdata->lock. */
62 	prof_cnt_t		cnts;
63 
64 	/* Associated global context. */
65 	prof_gctx_t		*gctx;
66 
67 	/*
68 	 * UID that distinguishes multiple tctx's created by the same thread,
69 	 * but coexisting in gctx->tctxs.  There are two ways that such
70 	 * coexistence can occur:
71 	 * - A dumper thread can cause a tctx to be retained in the purgatory
72 	 *   state.
73 	 * - Although a single "producer" thread must create all tctx's which
74 	 *   share the same thr_uid, multiple "consumers" can each concurrently
75 	 *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
76 	 *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
77 	 *   threshold can be hit again before the first consumer finishes
78 	 *   executing prof_tctx_destroy().
79 	 */
80 	uint64_t		tctx_uid;
81 
82 	/* Linkage into gctx's tctxs. */
83 	rb_node(prof_tctx_t)	tctx_link;
84 
85 	/*
86 	 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
87 	 * sample vs destroy race.
88 	 */
89 	bool			prepared;
90 
91 	/* Current dump-related state, protected by gctx->lock. */
92 	prof_tctx_state_t	state;
93 
94 	/*
95 	 * Copy of cnts snapshotted during early dump phase, protected by
96 	 * dump_mtx.
97 	 */
98 	prof_cnt_t		dump_cnts;
99 };
100 typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
101 
102 struct prof_info_s {
103 	/* Time when the allocation was made. */
104 	nstime_t		alloc_time;
105 	/* Points to the prof_tctx_t corresponding to the allocation. */
106 	prof_tctx_t		*alloc_tctx;
107 	/* Allocation request size. */
108 	size_t			alloc_size;
109 };
110 
111 struct prof_gctx_s {
112 	/* Protects nlimbo, cnt_summed, and tctxs. */
113 	malloc_mutex_t		*lock;
114 
115 	/*
116 	 * Number of threads that currently cause this gctx to be in a state of
117 	 * limbo due to one of:
118 	 *   - Initializing this gctx.
119 	 *   - Initializing per thread counters associated with this gctx.
120 	 *   - Preparing to destroy this gctx.
121 	 *   - Dumping a heap profile that includes this gctx.
122 	 * nlimbo must be 1 (single destroyer) in order to safely destroy the
123 	 * gctx.
124 	 */
125 	unsigned		nlimbo;
126 
127 	/*
128 	 * Tree of profile counters, one for each thread that has allocated in
129 	 * this context.
130 	 */
131 	prof_tctx_tree_t	tctxs;
132 
133 	/* Linkage for tree of contexts to be dumped. */
134 	rb_node(prof_gctx_t)	dump_link;
135 
136 	/* Temporary storage for summation during dump. */
137 	prof_cnt_t		cnt_summed;
138 
139 	/* Associated backtrace. */
140 	prof_bt_t		bt;
141 
142 	/* Backtrace vector, variable size, referred to by bt. */
143 	void			*vec[1];
144 };
145 typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
146 
147 struct prof_tdata_s {
148 	malloc_mutex_t		*lock;
149 
150 	/* Monotonically increasing unique thread identifier. */
151 	uint64_t		thr_uid;
152 
153 	/*
154 	 * Monotonically increasing discriminator among tdata structures
155 	 * associated with the same thr_uid.
156 	 */
157 	uint64_t		thr_discrim;
158 
159 	/* Included in heap profile dumps if non-NULL. */
160 	char			*thread_name;
161 
162 	bool			attached;
163 	bool			expired;
164 
165 	rb_node(prof_tdata_t)	tdata_link;
166 
167 	/*
168 	 * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
169 	 * necessary when incrementing this field, because only one thread ever
170 	 * does so.
171 	 */
172 	uint64_t		tctx_uid_next;
173 
174 	/*
175 	 * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
176 	 * backtraces for which it has non-zero allocation/deallocation counters
177 	 * associated with thread-specific prof_tctx_t objects.  Other threads
178 	 * may write to prof_tctx_t contents when freeing associated objects.
179 	 */
180 	ckh_t			bt2tctx;
181 
182 	/* State used to avoid dumping while operating on prof internals. */
183 	bool			enq;
184 	bool			enq_idump;
185 	bool			enq_gdump;
186 
187 	/*
188 	 * Set to true during an early dump phase for tdata's which are
189 	 * currently being dumped.  New threads' tdata's have this initialized
190 	 * to false so that they aren't accidentally included in later dump
191 	 * phases.
192 	 */
193 	bool			dumping;
194 
195 	/*
196 	 * True if profiling is active for this tdata's thread
197 	 * (thread.prof.active mallctl).
198 	 */
199 	bool			active;
200 
201 	/* Temporary storage for summation during dump. */
202 	prof_cnt_t		cnt_summed;
203 
204 	/* Backtrace vector, used for calls to prof_backtrace(). */
205 	void			*vec[PROF_BT_MAX];
206 };
207 typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
208 
209 struct prof_recent_s {
210 	nstime_t alloc_time;
211 	nstime_t dalloc_time;
212 
213 	ql_elm(prof_recent_t) link;
214 	size_t size;
215 	size_t usize;
216 	atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
217 	prof_tctx_t *alloc_tctx;
218 	prof_tctx_t *dalloc_tctx;
219 };
220 
221 #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
222