xref: /freebsd/contrib/jemalloc/src/mutex.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 #define JEMALLOC_MUTEX_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/malloc_io.h"
7 
8 #ifndef _CRT_SPINCOUNT
9 #define _CRT_SPINCOUNT 4000
10 #endif
11 
12 /******************************************************************************/
13 /* Data. */
14 
15 #ifdef JEMALLOC_LAZY_LOCK
16 bool isthreaded = false;
17 #endif
18 #ifdef JEMALLOC_MUTEX_INIT_CB
19 static bool		postpone_init = true;
20 static malloc_mutex_t	*postponed_mutexes = NULL;
21 #endif
22 
23 /******************************************************************************/
24 /*
25  * We intercept pthread_create() calls in order to toggle isthreaded if the
26  * process goes multi-threaded.
27  */
28 
29 #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
30 JEMALLOC_EXPORT int
31 pthread_create(pthread_t *__restrict thread,
32     const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
33     void *__restrict arg) {
34 	return pthread_create_wrapper(thread, attr, start_routine, arg);
35 }
36 #endif
37 
38 /******************************************************************************/
39 
40 #ifdef JEMALLOC_MUTEX_INIT_CB
41 JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
42     void *(calloc_cb)(size_t, size_t));
43 
44 #pragma weak _pthread_mutex_init_calloc_cb
45 int
46 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
47     void *(calloc_cb)(size_t, size_t))
48 {
49 
50 	return (((int (*)(pthread_mutex_t *, void *(*)(size_t, size_t)))
51 	    __libc_interposing[INTERPOS__pthread_mutex_init_calloc_cb])(mutex,
52 	    calloc_cb));
53 }
54 #endif
55 
56 void
57 malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
58 	mutex_prof_data_t *data = &mutex->prof_data;
59 	UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
60 
61 	if (ncpus == 1) {
62 		goto label_spin_done;
63 	}
64 
65 	int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
66 	do {
67 		CPU_SPINWAIT;
68 		if (!malloc_mutex_trylock_final(mutex)) {
69 			data->n_spin_acquired++;
70 			return;
71 		}
72 	} while (cnt++ < max_cnt);
73 
74 	if (!config_stats) {
75 		/* Only spin is useful when stats is off. */
76 		malloc_mutex_lock_final(mutex);
77 		return;
78 	}
79 label_spin_done:
80 	nstime_update(&before);
81 	/* Copy before to after to avoid clock skews. */
82 	nstime_t after;
83 	nstime_copy(&after, &before);
84 	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
85 	    ATOMIC_RELAXED) + 1;
86 	/* One last try as above two calls may take quite some cycles. */
87 	if (!malloc_mutex_trylock_final(mutex)) {
88 		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
89 		data->n_spin_acquired++;
90 		return;
91 	}
92 
93 	/* True slow path. */
94 	malloc_mutex_lock_final(mutex);
95 	/* Update more slow-path only counters. */
96 	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
97 	nstime_update(&after);
98 
99 	nstime_t delta;
100 	nstime_copy(&delta, &after);
101 	nstime_subtract(&delta, &before);
102 
103 	data->n_wait_times++;
104 	nstime_add(&data->tot_wait_time, &delta);
105 	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
106 		nstime_copy(&data->max_wait_time, &delta);
107 	}
108 	if (n_thds > data->max_n_thds) {
109 		data->max_n_thds = n_thds;
110 	}
111 }
112 
113 static void
114 mutex_prof_data_init(mutex_prof_data_t *data) {
115 	memset(data, 0, sizeof(mutex_prof_data_t));
116 	nstime_init(&data->max_wait_time, 0);
117 	nstime_init(&data->tot_wait_time, 0);
118 	data->prev_owner = NULL;
119 }
120 
121 void
122 malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
123 	malloc_mutex_assert_owner(tsdn, mutex);
124 	mutex_prof_data_init(&mutex->prof_data);
125 }
126 
127 static int
128 mutex_addr_comp(const witness_t *witness1, void *mutex1,
129     const witness_t *witness2, void *mutex2) {
130 	assert(mutex1 != NULL);
131 	assert(mutex2 != NULL);
132 	uintptr_t mu1int = (uintptr_t)mutex1;
133 	uintptr_t mu2int = (uintptr_t)mutex2;
134 	if (mu1int < mu2int) {
135 		return -1;
136 	} else if (mu1int == mu2int) {
137 		return 0;
138 	} else {
139 		return 1;
140 	}
141 }
142 
143 bool
144 malloc_mutex_first_thread(void) {
145 
146 #ifndef JEMALLOC_MUTEX_INIT_CB
147 	return (malloc_mutex_first_thread());
148 #else
149 	return (false);
150 #endif
151 }
152 
153 bool
154 malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
155     witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
156 	mutex_prof_data_init(&mutex->prof_data);
157 #ifdef _WIN32
158 #  if _WIN32_WINNT >= 0x0600
159 	InitializeSRWLock(&mutex->lock);
160 #  else
161 	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
162 	    _CRT_SPINCOUNT)) {
163 		return true;
164 	}
165 #  endif
166 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
167 	mutex->lock = OS_UNFAIR_LOCK_INIT;
168 #elif (defined(JEMALLOC_OSSPIN))
169 	mutex->lock = 0;
170 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
171 	if (postpone_init) {
172 		mutex->postponed_next = postponed_mutexes;
173 		postponed_mutexes = mutex;
174 	} else {
175 		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
176 		    bootstrap_calloc) != 0) {
177 			return true;
178 		}
179 	}
180 #else
181 	pthread_mutexattr_t attr;
182 
183 	if (pthread_mutexattr_init(&attr) != 0) {
184 		return true;
185 	}
186 	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
187 	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
188 		pthread_mutexattr_destroy(&attr);
189 		return true;
190 	}
191 	pthread_mutexattr_destroy(&attr);
192 #endif
193 	if (config_debug) {
194 		mutex->lock_order = lock_order;
195 		if (lock_order == malloc_mutex_address_ordered) {
196 			witness_init(&mutex->witness, name, rank,
197 			    mutex_addr_comp, &mutex);
198 		} else {
199 			witness_init(&mutex->witness, name, rank, NULL, NULL);
200 		}
201 	}
202 	return false;
203 }
204 
205 void
206 malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
207 	malloc_mutex_lock(tsdn, mutex);
208 }
209 
210 void
211 malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
212 	malloc_mutex_unlock(tsdn, mutex);
213 }
214 
215 void
216 malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
217 #ifdef JEMALLOC_MUTEX_INIT_CB
218 	malloc_mutex_unlock(tsdn, mutex);
219 #else
220 	if (malloc_mutex_init(mutex, mutex->witness.name,
221 	    mutex->witness.rank, mutex->lock_order)) {
222 		malloc_printf("<jemalloc>: Error re-initializing mutex in "
223 		    "child\n");
224 		if (opt_abort) {
225 			abort();
226 		}
227 	}
228 #endif
229 }
230 
231 bool
232 malloc_mutex_boot(void) {
233 #ifdef JEMALLOC_MUTEX_INIT_CB
234 	postpone_init = false;
235 	while (postponed_mutexes != NULL) {
236 		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
237 		    bootstrap_calloc) != 0) {
238 			return true;
239 		}
240 		postponed_mutexes = postponed_mutexes->postponed_next;
241 	}
242 #endif
243 	return false;
244 }
245