xref: /freebsd/contrib/jemalloc/src/mutex.c (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/malloc_io.h"
6 #include "jemalloc/internal/spin.h"
7 
8 #ifndef _CRT_SPINCOUNT
9 #define _CRT_SPINCOUNT 4000
10 #endif
11 
12 /*
13  * Based on benchmark results, a fixed spin with this amount of retries works
14  * well for our critical sections.
15  */
16 int64_t opt_mutex_max_spin = 600;
17 
18 /******************************************************************************/
19 /* Data. */
20 
21 #ifdef JEMALLOC_LAZY_LOCK
22 bool isthreaded = false;
23 #endif
24 #ifdef JEMALLOC_MUTEX_INIT_CB
25 static bool		postpone_init = true;
26 static malloc_mutex_t	*postponed_mutexes = NULL;
27 #endif
28 
29 /******************************************************************************/
30 /*
31  * We intercept pthread_create() calls in order to toggle isthreaded if the
32  * process goes multi-threaded.
33  */
34 
35 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
36 JEMALLOC_EXPORT int
37 pthread_create(pthread_t *__restrict thread,
38     const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
39     void *__restrict arg) {
40 	return pthread_create_wrapper(thread, attr, start_routine, arg);
41 }
42 #endif
43 
44 /******************************************************************************/
45 
46 #ifdef JEMALLOC_MUTEX_INIT_CB
47 JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
48     void *(calloc_cb)(size_t, size_t));
49 
50 #ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
51 #pragma weak _pthread_mutex_init_calloc_cb
52 int
53 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
54     void *(calloc_cb)(size_t, size_t))
55 {
56 
57 	return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
58 	    __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
59 	    calloc_cb));
60 }
61 #endif
62 #endif
63 
64 void
65 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
66 	mutex_prof_data_t *data = &mutex->prof_data;
67 	nstime_t before;
68 
69 	if (ncpus == 1) {
70 		goto label_spin_done;
71 	}
72 
73 	int cnt = 0;
74 	do {
75 		spin_cpu_spinwait();
76 		if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
77                     && !malloc_mutex_trylock_final(mutex)) {
78 			data->n_spin_acquired++;
79 			return;
80 		}
81 	} while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
82 
83 	if (!config_stats) {
84 		/* Only spin is useful when stats is off. */
85 		malloc_mutex_lock_final(mutex);
86 		return;
87 	}
88 label_spin_done:
89 	nstime_init_update(&before);
90 	/* Copy before to after to avoid clock skews. */
91 	nstime_t after;
92 	nstime_copy(&after, &before);
93 	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
94 	    ATOMIC_RELAXED) + 1;
95 	/* One last try as above two calls may take quite some cycles. */
96 	if (!malloc_mutex_trylock_final(mutex)) {
97 		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
98 		data->n_spin_acquired++;
99 		return;
100 	}
101 
102 	/* True slow path. */
103 	malloc_mutex_lock_final(mutex);
104 	/* Update more slow-path only counters. */
105 	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
106 	nstime_update(&after);
107 
108 	nstime_t delta;
109 	nstime_copy(&delta, &after);
110 	nstime_subtract(&delta, &before);
111 
112 	data->n_wait_times++;
113 	nstime_add(&data->tot_wait_time, &delta);
114 	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
115 		nstime_copy(&data->max_wait_time, &delta);
116 	}
117 	if (n_thds > data->max_n_thds) {
118 		data->max_n_thds = n_thds;
119 	}
120 }
121 
122 static void
123 mutex_prof_data_init(mutex_prof_data_t *data) {
124 	memset(data, 0, sizeof(mutex_prof_data_t));
125 	nstime_init_zero(&data->max_wait_time);
126 	nstime_init_zero(&data->tot_wait_time);
127 	data->prev_owner = NULL;
128 }
129 
130 void
131 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
132 	malloc_mutex_assert_owner(tsdn, mutex);
133 	mutex_prof_data_init(&mutex->prof_data);
134 }
135 
136 static int
137 mutex_addr_comp(const witness_t *witness1, void *mutex1,
138     const witness_t *witness2, void *mutex2) {
139 	assert(mutex1 != NULL);
140 	assert(mutex2 != NULL);
141 	uintptr_t mu1int = (uintptr_t)mutex1;
142 	uintptr_t mu2int = (uintptr_t)mutex2;
143 	if (mu1int < mu2int) {
144 		return -1;
145 	} else if (mu1int == mu2int) {
146 		return 0;
147 	} else {
148 		return 1;
149 	}
150 }
151 
152 bool
153 malloc_mutex_first_thread(void) {
154 
155 #ifndef JEMALLOC_MUTEX_INIT_CB
156 	return (malloc_mutex_first_thread());
157 #else
158 	return (false);
159 #endif
160 }
161 
162 bool
163 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
164     witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
165 	mutex_prof_data_init(&mutex->prof_data);
166 #ifdef _WIN32
167 #  if _WIN32_WINNT >= 0x0600
168 	InitializeSRWLock(&mutex->lock);
169 #  else
170 	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
171 	    _CRT_SPINCOUNT)) {
172 		return true;
173 	}
174 #  endif
175 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
176        mutex->lock = OS_UNFAIR_LOCK_INIT;
177 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
178 	if (postpone_init) {
179 		mutex->postponed_next = postponed_mutexes;
180 		postponed_mutexes = mutex;
181 	} else {
182 		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
183 		    bootstrap_calloc) != 0) {
184 			return true;
185 		}
186 	}
187 #else
188 	pthread_mutexattr_t attr;
189 
190 	if (pthread_mutexattr_init(&attr) != 0) {
191 		return true;
192 	}
193 	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
194 	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
195 		pthread_mutexattr_destroy(&attr);
196 		return true;
197 	}
198 	pthread_mutexattr_destroy(&attr);
199 #endif
200 	if (config_debug) {
201 		mutex->lock_order = lock_order;
202 		if (lock_order == malloc_mutex_address_ordered) {
203 			witness_init(&mutex->witness, name, rank,
204 			    mutex_addr_comp, mutex);
205 		} else {
206 			witness_init(&mutex->witness, name, rank, NULL, NULL);
207 		}
208 	}
209 	return false;
210 }
211 
212 void
213 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
214 	malloc_mutex_lock(tsdn, mutex);
215 }
216 
217 void
218 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
219 	malloc_mutex_unlock(tsdn, mutex);
220 }
221 
222 void
223 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
224 #ifdef JEMALLOC_MUTEX_INIT_CB
225 	malloc_mutex_unlock(tsdn, mutex);
226 #else
227 	if (malloc_mutex_init(mutex, mutex->witness.name,
228 	    mutex->witness.rank, mutex->lock_order)) {
229 		malloc_printf("<jemalloc>: Error re-initializing mutex in "
230 		    "child\n");
231 		if (opt_abort) {
232 			abort();
233 		}
234 	}
235 #endif
236 }
237 
238 bool
239 malloc_mutex_boot(void) {
240 #ifdef JEMALLOC_MUTEX_INIT_CB
241 	postpone_init = false;
242 	while (postponed_mutexes != NULL) {
243 		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
244 		    bootstrap_calloc) != 0) {
245 			return true;
246 		}
247 		postponed_mutexes = postponed_mutexes->postponed_next;
248 	}
249 #endif
250 	return false;
251 }
252