xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/mutex.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_MUTEX_H
2 #define JEMALLOC_INTERNAL_MUTEX_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex_prof.h"
6 #include "jemalloc/internal/tsd.h"
7 #include "jemalloc/internal/witness.h"
8 
9 extern int64_t opt_mutex_max_spin;
10 
11 typedef enum {
12 	/* Can only acquire one mutex of a given witness rank at a time. */
13 	malloc_mutex_rank_exclusive,
14 	/*
15 	 * Can acquire multiple mutexes of the same witness rank, but in
16 	 * address-ascending order only.
17 	 */
18 	malloc_mutex_address_ordered
19 } malloc_mutex_lock_order_t;
20 
21 typedef struct malloc_mutex_s malloc_mutex_t;
22 struct malloc_mutex_s {
23 	union {
24 		struct {
25 			/*
26 			 * prof_data is defined first to reduce cacheline
27 			 * bouncing: the data is not touched by the mutex holder
28 			 * during unlocking, while might be modified by
29 			 * contenders.  Having it before the mutex itself could
30 			 * avoid prefetching a modified cacheline (for the
31 			 * unlocking thread).
32 			 */
33 			mutex_prof_data_t	prof_data;
34 #ifdef _WIN32
35 #  if _WIN32_WINNT >= 0x0600
36 			SRWLOCK         	lock;
37 #  else
38 			CRITICAL_SECTION	lock;
39 #  endif
40 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
41 			os_unfair_lock		lock;
42 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
43 			pthread_mutex_t		lock;
44 			malloc_mutex_t		*postponed_next;
45 #else
46 			pthread_mutex_t		lock;
47 #endif
48 			/*
49 			 * Hint flag to avoid exclusive cache line contention
50 			 * during spin waiting
51 			 */
52 			atomic_b_t		locked;
53 		};
54 		/*
55 		 * We only touch witness when configured w/ debug.  However we
56 		 * keep the field in a union when !debug so that we don't have
57 		 * to pollute the code base with #ifdefs, while avoid paying the
58 		 * memory cost.
59 		 */
60 #if !defined(JEMALLOC_DEBUG)
61 		witness_t			witness;
62 		malloc_mutex_lock_order_t	lock_order;
63 #endif
64 	};
65 
66 #if defined(JEMALLOC_DEBUG)
67 	witness_t			witness;
68 	malloc_mutex_lock_order_t	lock_order;
69 #endif
70 };
71 
72 #ifdef _WIN32
73 #  if _WIN32_WINNT >= 0x0600
74 #    define MALLOC_MUTEX_LOCK(m)    AcquireSRWLockExclusive(&(m)->lock)
75 #    define MALLOC_MUTEX_UNLOCK(m)  ReleaseSRWLockExclusive(&(m)->lock)
76 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
77 #  else
78 #    define MALLOC_MUTEX_LOCK(m)    EnterCriticalSection(&(m)->lock)
79 #    define MALLOC_MUTEX_UNLOCK(m)  LeaveCriticalSection(&(m)->lock)
80 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
81 #  endif
82 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
83 #    define MALLOC_MUTEX_LOCK(m)    os_unfair_lock_lock(&(m)->lock)
84 #    define MALLOC_MUTEX_UNLOCK(m)  os_unfair_lock_unlock(&(m)->lock)
85 #    define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
86 #else
87 #    define MALLOC_MUTEX_LOCK(m)    pthread_mutex_lock(&(m)->lock)
88 #    define MALLOC_MUTEX_UNLOCK(m)  pthread_mutex_unlock(&(m)->lock)
89 #    define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
90 #endif
91 
92 #define LOCK_PROF_DATA_INITIALIZER					\
93     {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0,		\
94 	    ATOMIC_INIT(0), 0, NULL, 0}
95 
96 #ifdef _WIN32
97 #  define MALLOC_MUTEX_INITIALIZER
98 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
99 #  if defined(JEMALLOC_DEBUG)
100 #    define MALLOC_MUTEX_INITIALIZER					\
101   {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
102          WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
103 #  else
104 #    define MALLOC_MUTEX_INITIALIZER                      \
105   {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}},  \
106       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
107 #  endif
108 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
109 #  if (defined(JEMALLOC_DEBUG))
110 #     define MALLOC_MUTEX_INITIALIZER					\
111       {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
112            WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
113 #  else
114 #     define MALLOC_MUTEX_INITIALIZER					\
115       {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
116            WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
117 #  endif
118 
119 #else
120 #    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
121 #  if defined(JEMALLOC_DEBUG)
122 #    define MALLOC_MUTEX_INITIALIZER					\
123      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
124            WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
125 #  else
126 #    define MALLOC_MUTEX_INITIALIZER                          \
127      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}},	\
128       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
129 #  endif
130 #endif
131 
132 #ifdef JEMALLOC_LAZY_LOCK
133 extern bool isthreaded;
134 #endif
135 
136 bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
137     witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
138 void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
139 void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
140 void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
141 bool malloc_mutex_first_thread(void);
142 bool malloc_mutex_boot(void);
143 void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
144 
145 void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
146 
147 static inline void
148 malloc_mutex_lock_final(malloc_mutex_t *mutex) {
149 	MALLOC_MUTEX_LOCK(mutex);
150 	atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
151 }
152 
153 static inline bool
154 malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
155 	return MALLOC_MUTEX_TRYLOCK(mutex);
156 }
157 
158 static inline void
159 mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
160 	if (config_stats) {
161 		mutex_prof_data_t *data = &mutex->prof_data;
162 		data->n_lock_ops++;
163 		if (data->prev_owner != tsdn) {
164 			data->prev_owner = tsdn;
165 			data->n_owner_switches++;
166 		}
167 	}
168 }
169 
170 /* Trylock: return false if the lock is successfully acquired. */
171 static inline bool
172 malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
173 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
174 	if (isthreaded) {
175 		if (malloc_mutex_trylock_final(mutex)) {
176 			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
177 			return true;
178 		}
179 		mutex_owner_stats_update(tsdn, mutex);
180 	}
181 	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
182 
183 	return false;
184 }
185 
186 /* Aggregate lock prof data. */
187 static inline void
188 malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
189 	nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
190 	if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
191 		nstime_copy(&sum->max_wait_time, &data->max_wait_time);
192 	}
193 
194 	sum->n_wait_times += data->n_wait_times;
195 	sum->n_spin_acquired += data->n_spin_acquired;
196 
197 	if (sum->max_n_thds < data->max_n_thds) {
198 		sum->max_n_thds = data->max_n_thds;
199 	}
200 	uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
201 	    ATOMIC_RELAXED);
202 	uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
203 	    &data->n_waiting_thds, ATOMIC_RELAXED);
204 	atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
205 	    ATOMIC_RELAXED);
206 	sum->n_owner_switches += data->n_owner_switches;
207 	sum->n_lock_ops += data->n_lock_ops;
208 }
209 
210 static inline void
211 malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
213 	if (isthreaded) {
214 		if (malloc_mutex_trylock_final(mutex)) {
215 			malloc_mutex_lock_slow(mutex);
216 			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
217 		}
218 		mutex_owner_stats_update(tsdn, mutex);
219 	}
220 	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
221 }
222 
223 static inline void
224 malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
225 	atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
226 	witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
227 	if (isthreaded) {
228 		MALLOC_MUTEX_UNLOCK(mutex);
229 	}
230 }
231 
232 static inline void
233 malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
234 	witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
235 }
236 
237 static inline void
238 malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
239 	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
240 }
241 
242 static inline void
243 malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
244 	/*
245 	 * Not *really* allowed (we shouldn't be doing non-atomic loads of
246 	 * atomic data), but the mutex protection makes this safe, and writing
247 	 * a member-for-member copy is tedious for this situation.
248 	 */
249 	*dst = *source;
250 	/* n_wait_thds is not reported (modified w/o locking). */
251 	atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
252 }
253 
254 /* Copy the prof data from mutex for processing. */
255 static inline void
256 malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
257     malloc_mutex_t *mutex) {
258 	/* Can only read holding the mutex. */
259 	malloc_mutex_assert_owner(tsdn, mutex);
260 	malloc_mutex_prof_copy(data, &mutex->prof_data);
261 }
262 
263 static inline void
264 malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
265     malloc_mutex_t *mutex) {
266 	mutex_prof_data_t *source = &mutex->prof_data;
267 	/* Can only read holding the mutex. */
268 	malloc_mutex_assert_owner(tsdn, mutex);
269 
270 	nstime_add(&data->tot_wait_time, &source->tot_wait_time);
271 	if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
272 		nstime_copy(&data->max_wait_time, &source->max_wait_time);
273 	}
274 	data->n_wait_times += source->n_wait_times;
275 	data->n_spin_acquired += source->n_spin_acquired;
276 	if (data->max_n_thds < source->max_n_thds) {
277 		data->max_n_thds = source->max_n_thds;
278 	}
279 	/* n_wait_thds is not reported. */
280 	atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
281 	data->n_owner_switches += source->n_owner_switches;
282 	data->n_lock_ops += source->n_lock_ops;
283 }
284 
285 /* Compare the prof data and update to the maximum. */
286 static inline void
287 malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
288     malloc_mutex_t *mutex) {
289 	mutex_prof_data_t *source = &mutex->prof_data;
290 	/* Can only read holding the mutex. */
291 	malloc_mutex_assert_owner(tsdn, mutex);
292 
293 	if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
294 		nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
295 	}
296 	if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
297 		nstime_copy(&data->max_wait_time, &source->max_wait_time);
298 	}
299 	if (source->n_wait_times > data->n_wait_times) {
300 		data->n_wait_times = source->n_wait_times;
301 	}
302 	if (source->n_spin_acquired > data->n_spin_acquired) {
303 		data->n_spin_acquired = source->n_spin_acquired;
304 	}
305 	if (source->max_n_thds > data->max_n_thds) {
306 		data->max_n_thds = source->max_n_thds;
307 	}
308 	if (source->n_owner_switches > data->n_owner_switches) {
309 		data->n_owner_switches = source->n_owner_switches;
310 	}
311 	if (source->n_lock_ops > data->n_lock_ops) {
312 		data->n_lock_ops = source->n_lock_ops;
313 	}
314 	/* n_wait_thds is not reported. */
315 }
316 
317 #endif /* JEMALLOC_INTERNAL_MUTEX_H */
318