1 #define JEMALLOC_MUTEX_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/malloc_io.h"
7 #include "jemalloc/internal/spin.h"
8
9 #ifndef _CRT_SPINCOUNT
10 #define _CRT_SPINCOUNT 4000
11 #endif
12
13 /******************************************************************************/
14 /* Data. */
15
16 #ifdef JEMALLOC_LAZY_LOCK
17 bool isthreaded = false;
18 #endif
19 #ifdef JEMALLOC_MUTEX_INIT_CB
20 static bool postpone_init = true;
21 static malloc_mutex_t *postponed_mutexes = NULL;
22 #endif
23
24 /******************************************************************************/
25 /*
26 * We intercept pthread_create() calls in order to toggle isthreaded if the
27 * process goes multi-threaded.
28 */
29
30 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
31 JEMALLOC_EXPORT int
pthread_create(pthread_t * __restrict thread,const pthread_attr_t * __restrict attr,void * (* start_routine)(void *),void * __restrict arg)32 pthread_create(pthread_t *__restrict thread,
33 const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
34 void *__restrict arg) {
35 return pthread_create_wrapper(thread, attr, start_routine, arg);
36 }
37 #endif
38
39 /******************************************************************************/
40
41 #ifdef JEMALLOC_MUTEX_INIT_CB
42 JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
43 void *(calloc_cb)(size_t, size_t));
44
45 #pragma weak _pthread_mutex_init_calloc_cb
46 int
_pthread_mutex_init_calloc_cb(pthread_mutex_t * mutex,void * (calloc_cb)(size_t,size_t))47 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
48 void *(calloc_cb)(size_t, size_t))
49 {
50
51 return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
52 __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
53 calloc_cb));
54 }
55 #endif
56
57 void
malloc_mutex_lock_slow(malloc_mutex_t * mutex)58 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
59 mutex_prof_data_t *data = &mutex->prof_data;
60 nstime_t before = NSTIME_ZERO_INITIALIZER;
61
62 if (ncpus == 1) {
63 goto label_spin_done;
64 }
65
66 int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
67 do {
68 spin_cpu_spinwait();
69 if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
70 && !malloc_mutex_trylock_final(mutex)) {
71 data->n_spin_acquired++;
72 return;
73 }
74 } while (cnt++ < max_cnt);
75
76 if (!config_stats) {
77 /* Only spin is useful when stats is off. */
78 malloc_mutex_lock_final(mutex);
79 return;
80 }
81 label_spin_done:
82 nstime_update(&before);
83 /* Copy before to after to avoid clock skews. */
84 nstime_t after;
85 nstime_copy(&after, &before);
86 uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
87 ATOMIC_RELAXED) + 1;
88 /* One last try as above two calls may take quite some cycles. */
89 if (!malloc_mutex_trylock_final(mutex)) {
90 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
91 data->n_spin_acquired++;
92 return;
93 }
94
95 /* True slow path. */
96 malloc_mutex_lock_final(mutex);
97 /* Update more slow-path only counters. */
98 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
99 nstime_update(&after);
100
101 nstime_t delta;
102 nstime_copy(&delta, &after);
103 nstime_subtract(&delta, &before);
104
105 data->n_wait_times++;
106 nstime_add(&data->tot_wait_time, &delta);
107 if (nstime_compare(&data->max_wait_time, &delta) < 0) {
108 nstime_copy(&data->max_wait_time, &delta);
109 }
110 if (n_thds > data->max_n_thds) {
111 data->max_n_thds = n_thds;
112 }
113 }
114
115 static void
mutex_prof_data_init(mutex_prof_data_t * data)116 mutex_prof_data_init(mutex_prof_data_t *data) {
117 memset(data, 0, sizeof(mutex_prof_data_t));
118 nstime_init(&data->max_wait_time, 0);
119 nstime_init(&data->tot_wait_time, 0);
120 data->prev_owner = NULL;
121 }
122
123 void
malloc_mutex_prof_data_reset(tsdn_t * tsdn,malloc_mutex_t * mutex)124 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
125 malloc_mutex_assert_owner(tsdn, mutex);
126 mutex_prof_data_init(&mutex->prof_data);
127 }
128
129 static int
mutex_addr_comp(const witness_t * witness1,void * mutex1,const witness_t * witness2,void * mutex2)130 mutex_addr_comp(const witness_t *witness1, void *mutex1,
131 const witness_t *witness2, void *mutex2) {
132 assert(mutex1 != NULL);
133 assert(mutex2 != NULL);
134 uintptr_t mu1int = (uintptr_t)mutex1;
135 uintptr_t mu2int = (uintptr_t)mutex2;
136 if (mu1int < mu2int) {
137 return -1;
138 } else if (mu1int == mu2int) {
139 return 0;
140 } else {
141 return 1;
142 }
143 }
144
145 bool
malloc_mutex_first_thread(void)146 malloc_mutex_first_thread(void) {
147
148 #ifndef JEMALLOC_MUTEX_INIT_CB
149 return (malloc_mutex_first_thread());
150 #else
151 return (false);
152 #endif
153 }
154
155 bool
malloc_mutex_init(malloc_mutex_t * mutex,const char * name,witness_rank_t rank,malloc_mutex_lock_order_t lock_order)156 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
157 witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
158 mutex_prof_data_init(&mutex->prof_data);
159 #ifdef _WIN32
160 # if _WIN32_WINNT >= 0x0600
161 InitializeSRWLock(&mutex->lock);
162 # else
163 if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
164 _CRT_SPINCOUNT)) {
165 return true;
166 }
167 # endif
168 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
169 mutex->lock = OS_UNFAIR_LOCK_INIT;
170 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
171 if (postpone_init) {
172 mutex->postponed_next = postponed_mutexes;
173 postponed_mutexes = mutex;
174 } else {
175 if (_pthread_mutex_init_calloc_cb(&mutex->lock,
176 bootstrap_calloc) != 0) {
177 return true;
178 }
179 }
180 #else
181 pthread_mutexattr_t attr;
182
183 if (pthread_mutexattr_init(&attr) != 0) {
184 return true;
185 }
186 pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
187 if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
188 pthread_mutexattr_destroy(&attr);
189 return true;
190 }
191 pthread_mutexattr_destroy(&attr);
192 #endif
193 if (config_debug) {
194 mutex->lock_order = lock_order;
195 if (lock_order == malloc_mutex_address_ordered) {
196 witness_init(&mutex->witness, name, rank,
197 mutex_addr_comp, mutex);
198 } else {
199 witness_init(&mutex->witness, name, rank, NULL, NULL);
200 }
201 }
202 return false;
203 }
204
205 void
malloc_mutex_prefork(tsdn_t * tsdn,malloc_mutex_t * mutex)206 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
207 malloc_mutex_lock(tsdn, mutex);
208 }
209
210 void
malloc_mutex_postfork_parent(tsdn_t * tsdn,malloc_mutex_t * mutex)211 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 malloc_mutex_unlock(tsdn, mutex);
213 }
214
215 void
malloc_mutex_postfork_child(tsdn_t * tsdn,malloc_mutex_t * mutex)216 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
217 #ifdef JEMALLOC_MUTEX_INIT_CB
218 malloc_mutex_unlock(tsdn, mutex);
219 #else
220 if (malloc_mutex_init(mutex, mutex->witness.name,
221 mutex->witness.rank, mutex->lock_order)) {
222 malloc_printf("<jemalloc>: Error re-initializing mutex in "
223 "child\n");
224 if (opt_abort) {
225 abort();
226 }
227 }
228 #endif
229 }
230
231 bool
malloc_mutex_boot(void)232 malloc_mutex_boot(void) {
233 #ifdef JEMALLOC_MUTEX_INIT_CB
234 postpone_init = false;
235 while (postponed_mutexes != NULL) {
236 if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
237 bootstrap_calloc) != 0) {
238 return true;
239 }
240 postponed_mutexes = postponed_mutexes->postponed_next;
241 }
242 #endif
243 return false;
244 }
245