xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/prof_structs.h (revision 2a243b9539a45b392a515569cab2091844cf2bdf)
1 #ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
2 #define JEMALLOC_INTERNAL_PROF_STRUCTS_H
3 
4 #include "jemalloc/internal/ckh.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/prng.h"
7 #include "jemalloc/internal/rb.h"
8 
9 struct prof_bt_s {
10 	/* Backtrace, stored as len program counters. */
11 	void		**vec;
12 	unsigned	len;
13 };
14 
15 #ifdef JEMALLOC_PROF_LIBGCC
16 /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
17 typedef struct {
18 	prof_bt_t	*bt;
19 	unsigned	max;
20 } prof_unwind_data_t;
21 #endif
22 
23 struct prof_accum_s {
24 #ifndef JEMALLOC_ATOMIC_U64
25 	malloc_mutex_t	mtx;
26 	uint64_t	accumbytes;
27 #else
28 	atomic_u64_t	accumbytes;
29 #endif
30 };
31 
32 struct prof_cnt_s {
33 	/* Profiling counters. */
34 	uint64_t	curobjs;
35 	uint64_t	curbytes;
36 	uint64_t	accumobjs;
37 	uint64_t	accumbytes;
38 };
39 
40 typedef enum {
41 	prof_tctx_state_initializing,
42 	prof_tctx_state_nominal,
43 	prof_tctx_state_dumping,
44 	prof_tctx_state_purgatory /* Dumper must finish destroying. */
45 } prof_tctx_state_t;
46 
47 struct prof_tctx_s {
48 	/* Thread data for thread that performed the allocation. */
49 	prof_tdata_t		*tdata;
50 
51 	/*
52 	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
53 	 * defunct during teardown.
54 	 */
55 	uint64_t		thr_uid;
56 	uint64_t		thr_discrim;
57 
58 	/* Profiling counters, protected by tdata->lock. */
59 	prof_cnt_t		cnts;
60 
61 	/* Associated global context. */
62 	prof_gctx_t		*gctx;
63 
64 	/*
65 	 * UID that distinguishes multiple tctx's created by the same thread,
66 	 * but coexisting in gctx->tctxs.  There are two ways that such
67 	 * coexistence can occur:
68 	 * - A dumper thread can cause a tctx to be retained in the purgatory
69 	 *   state.
70 	 * - Although a single "producer" thread must create all tctx's which
71 	 *   share the same thr_uid, multiple "consumers" can each concurrently
72 	 *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
73 	 *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
74 	 *   threshold can be hit again before the first consumer finishes
75 	 *   executing prof_tctx_destroy().
76 	 */
77 	uint64_t		tctx_uid;
78 
79 	/* Linkage into gctx's tctxs. */
80 	rb_node(prof_tctx_t)	tctx_link;
81 
82 	/*
83 	 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
84 	 * sample vs destroy race.
85 	 */
86 	bool			prepared;
87 
88 	/* Current dump-related state, protected by gctx->lock. */
89 	prof_tctx_state_t	state;
90 
91 	/*
92 	 * Copy of cnts snapshotted during early dump phase, protected by
93 	 * dump_mtx.
94 	 */
95 	prof_cnt_t		dump_cnts;
96 };
97 typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
98 
99 struct prof_gctx_s {
100 	/* Protects nlimbo, cnt_summed, and tctxs. */
101 	malloc_mutex_t		*lock;
102 
103 	/*
104 	 * Number of threads that currently cause this gctx to be in a state of
105 	 * limbo due to one of:
106 	 *   - Initializing this gctx.
107 	 *   - Initializing per thread counters associated with this gctx.
108 	 *   - Preparing to destroy this gctx.
109 	 *   - Dumping a heap profile that includes this gctx.
110 	 * nlimbo must be 1 (single destroyer) in order to safely destroy the
111 	 * gctx.
112 	 */
113 	unsigned		nlimbo;
114 
115 	/*
116 	 * Tree of profile counters, one for each thread that has allocated in
117 	 * this context.
118 	 */
119 	prof_tctx_tree_t	tctxs;
120 
121 	/* Linkage for tree of contexts to be dumped. */
122 	rb_node(prof_gctx_t)	dump_link;
123 
124 	/* Temporary storage for summation during dump. */
125 	prof_cnt_t		cnt_summed;
126 
127 	/* Associated backtrace. */
128 	prof_bt_t		bt;
129 
130 	/* Backtrace vector, variable size, referred to by bt. */
131 	void			*vec[1];
132 };
133 typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
134 
135 struct prof_tdata_s {
136 	malloc_mutex_t		*lock;
137 
138 	/* Monotonically increasing unique thread identifier. */
139 	uint64_t		thr_uid;
140 
141 	/*
142 	 * Monotonically increasing discriminator among tdata structures
143 	 * associated with the same thr_uid.
144 	 */
145 	uint64_t		thr_discrim;
146 
147 	/* Included in heap profile dumps if non-NULL. */
148 	char			*thread_name;
149 
150 	bool			attached;
151 	bool			expired;
152 
153 	rb_node(prof_tdata_t)	tdata_link;
154 
155 	/*
156 	 * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
157 	 * necessary when incrementing this field, because only one thread ever
158 	 * does so.
159 	 */
160 	uint64_t		tctx_uid_next;
161 
162 	/*
163 	 * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
164 	 * backtraces for which it has non-zero allocation/deallocation counters
165 	 * associated with thread-specific prof_tctx_t objects.  Other threads
166 	 * may write to prof_tctx_t contents when freeing associated objects.
167 	 */
168 	ckh_t			bt2tctx;
169 
170 	/* Sampling state. */
171 	uint64_t		prng_state;
172 	uint64_t		bytes_until_sample;
173 
174 	/* State used to avoid dumping while operating on prof internals. */
175 	bool			enq;
176 	bool			enq_idump;
177 	bool			enq_gdump;
178 
179 	/*
180 	 * Set to true during an early dump phase for tdata's which are
181 	 * currently being dumped.  New threads' tdata's have this initialized
182 	 * to false so that they aren't accidentally included in later dump
183 	 * phases.
184 	 */
185 	bool			dumping;
186 
187 	/*
188 	 * True if profiling is active for this tdata's thread
189 	 * (thread.prof.active mallctl).
190 	 */
191 	bool			active;
192 
193 	/* Temporary storage for summation during dump. */
194 	prof_cnt_t		cnt_summed;
195 
196 	/* Backtrace vector, used for calls to prof_backtrace(). */
197 	void			*vec[PROF_BT_MAX];
198 };
199 typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
200 
201 #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
202