xref: /freebsd/contrib/ntp/sntp/libevent/evthread.c (revision b740c88bfb6453416926271c089262e7164dace3)
1 /*
2  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29 
30 #ifndef EVENT__DISABLE_THREAD_SUPPORT
31 
32 #include "event2/thread.h"
33 
34 #include <stdlib.h>
35 #include <string.h>
36 
37 #include "log-internal.h"
38 #include "mm-internal.h"
39 #include "util-internal.h"
40 #include "evthread-internal.h"
41 
42 #ifdef EVTHREAD_EXPOSE_STRUCTS
43 #define GLOBAL
44 #else
45 #define GLOBAL static
46 #endif
47 
48 /* globals */
49 GLOBAL int evthread_lock_debugging_enabled_ = 0;
50 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
51 	0, 0, NULL, NULL, NULL, NULL
52 };
53 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
54 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
55 	0, NULL, NULL, NULL, NULL
56 };
57 
58 /* Used for debugging */
59 static struct evthread_lock_callbacks original_lock_fns_ = {
60 	0, 0, NULL, NULL, NULL, NULL
61 };
62 static struct evthread_condition_callbacks original_cond_fns_ = {
63 	0, NULL, NULL, NULL, NULL
64 };
65 
66 void
67 evthread_set_id_callback(unsigned long (*id_fn)(void))
68 {
69 	evthread_id_fn_ = id_fn;
70 }
71 
72 int
73 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
74 {
75 	struct evthread_lock_callbacks *target =
76 	    evthread_lock_debugging_enabled_
77 	    ? &original_lock_fns_ : &evthread_lock_fns_;
78 
79 	if (!cbs) {
80 		if (target->alloc)
81 			event_warnx("Trying to disable lock functions after "
82 			    "they have been set up will probaby not work.");
83 		memset(target, 0, sizeof(evthread_lock_fns_));
84 		return 0;
85 	}
86 	if (target->alloc) {
87 		/* Uh oh; we already had locking callbacks set up.*/
88 		if (target->lock_api_version == cbs->lock_api_version &&
89 			target->supported_locktypes == cbs->supported_locktypes &&
90 			target->alloc == cbs->alloc &&
91 			target->free == cbs->free &&
92 			target->lock == cbs->lock &&
93 			target->unlock == cbs->unlock) {
94 			/* no change -- allow this. */
95 			return 0;
96 		}
97 		event_warnx("Can't change lock callbacks once they have been "
98 		    "initialized.");
99 		return -1;
100 	}
101 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
102 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
103 		return event_global_setup_locks_(1);
104 	} else {
105 		return -1;
106 	}
107 }
108 
109 int
110 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
111 {
112 	struct evthread_condition_callbacks *target =
113 	    evthread_lock_debugging_enabled_
114 	    ? &original_cond_fns_ : &evthread_cond_fns_;
115 
116 	if (!cbs) {
117 		if (target->alloc_condition)
118 			event_warnx("Trying to disable condition functions "
119 			    "after they have been set up will probaby not "
120 			    "work.");
121 		memset(target, 0, sizeof(evthread_cond_fns_));
122 		return 0;
123 	}
124 	if (target->alloc_condition) {
125 		/* Uh oh; we already had condition callbacks set up.*/
126 		if (target->condition_api_version == cbs->condition_api_version &&
127 			target->alloc_condition == cbs->alloc_condition &&
128 			target->free_condition == cbs->free_condition &&
129 			target->signal_condition == cbs->signal_condition &&
130 			target->wait_condition == cbs->wait_condition) {
131 			/* no change -- allow this. */
132 			return 0;
133 		}
134 		event_warnx("Can't change condition callbacks once they "
135 		    "have been initialized.");
136 		return -1;
137 	}
138 	if (cbs->alloc_condition && cbs->free_condition &&
139 	    cbs->signal_condition && cbs->wait_condition) {
140 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
141 	}
142 	if (evthread_lock_debugging_enabled_) {
143 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
144 		evthread_cond_fns_.free_condition = cbs->free_condition;
145 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
146 	}
147 	return 0;
148 }
149 
150 #define DEBUG_LOCK_SIG	0xdeb0b10c
151 
152 struct debug_lock {
153 	unsigned signature;
154 	unsigned locktype;
155 	unsigned long held_by;
156 	/* XXXX if we ever use read-write locks, we will need a separate
157 	 * lock to protect count. */
158 	int count;
159 	void *lock;
160 };
161 
162 static void *
163 debug_lock_alloc(unsigned locktype)
164 {
165 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
166 	if (!result)
167 		return NULL;
168 	if (original_lock_fns_.alloc) {
169 		if (!(result->lock = original_lock_fns_.alloc(
170 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
171 			mm_free(result);
172 			return NULL;
173 		}
174 	} else {
175 		result->lock = NULL;
176 	}
177 	result->signature = DEBUG_LOCK_SIG;
178 	result->locktype = locktype;
179 	result->count = 0;
180 	result->held_by = 0;
181 	return result;
182 }
183 
184 static void
185 debug_lock_free(void *lock_, unsigned locktype)
186 {
187 	struct debug_lock *lock = lock_;
188 	EVUTIL_ASSERT(lock->count == 0);
189 	EVUTIL_ASSERT(locktype == lock->locktype);
190 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
191 	if (original_lock_fns_.free) {
192 		original_lock_fns_.free(lock->lock,
193 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
194 	}
195 	lock->lock = NULL;
196 	lock->count = -100;
197 	lock->signature = 0x12300fda;
198 	mm_free(lock);
199 }
200 
201 static void
202 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
203 {
204 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
205 	++lock->count;
206 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
207 		EVUTIL_ASSERT(lock->count == 1);
208 	if (evthread_id_fn_) {
209 		unsigned long me;
210 		me = evthread_id_fn_();
211 		if (lock->count > 1)
212 			EVUTIL_ASSERT(lock->held_by == me);
213 		lock->held_by = me;
214 	}
215 }
216 
217 static int
218 debug_lock_lock(unsigned mode, void *lock_)
219 {
220 	struct debug_lock *lock = lock_;
221 	int res = 0;
222 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
223 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
224 	else
225 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
226 	if (original_lock_fns_.lock)
227 		res = original_lock_fns_.lock(mode, lock->lock);
228 	if (!res) {
229 		evthread_debug_lock_mark_locked(mode, lock);
230 	}
231 	return res;
232 }
233 
234 static void
235 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
236 {
237 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
238 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
239 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
240 	else
241 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
242 	if (evthread_id_fn_) {
243 		unsigned long me;
244 		me = evthread_id_fn_();
245 		EVUTIL_ASSERT(lock->held_by == me);
246 		if (lock->count == 1)
247 			lock->held_by = 0;
248 	}
249 	--lock->count;
250 	EVUTIL_ASSERT(lock->count >= 0);
251 }
252 
253 static int
254 debug_lock_unlock(unsigned mode, void *lock_)
255 {
256 	struct debug_lock *lock = lock_;
257 	int res = 0;
258 	evthread_debug_lock_mark_unlocked(mode, lock);
259 	if (original_lock_fns_.unlock)
260 		res = original_lock_fns_.unlock(mode, lock->lock);
261 	return res;
262 }
263 
264 static int
265 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
266 {
267 	int r;
268 	struct debug_lock *lock = lock_;
269 	EVUTIL_ASSERT(lock);
270 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
271 	EVLOCK_ASSERT_LOCKED(lock_);
272 	evthread_debug_lock_mark_unlocked(0, lock);
273 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
274 	evthread_debug_lock_mark_locked(0, lock);
275 	return r;
276 }
277 
278 /* misspelled version for backward compatibility */
279 void
280 evthread_enable_lock_debuging(void)
281 {
282 	evthread_enable_lock_debugging();
283 }
284 
285 void
286 evthread_enable_lock_debugging(void)
287 {
288 	struct evthread_lock_callbacks cbs = {
289 		EVTHREAD_LOCK_API_VERSION,
290 		EVTHREAD_LOCKTYPE_RECURSIVE,
291 		debug_lock_alloc,
292 		debug_lock_free,
293 		debug_lock_lock,
294 		debug_lock_unlock
295 	};
296 	if (evthread_lock_debugging_enabled_)
297 		return;
298 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
299 	    sizeof(struct evthread_lock_callbacks));
300 	memcpy(&evthread_lock_fns_, &cbs,
301 	    sizeof(struct evthread_lock_callbacks));
302 
303 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
304 	    sizeof(struct evthread_condition_callbacks));
305 	evthread_cond_fns_.wait_condition = debug_cond_wait;
306 	evthread_lock_debugging_enabled_ = 1;
307 
308 	/* XXX return value should get checked. */
309 	event_global_setup_locks_(0);
310 }
311 
312 int
313 evthread_is_debug_lock_held_(void *lock_)
314 {
315 	struct debug_lock *lock = lock_;
316 	if (! lock->count)
317 		return 0;
318 	if (evthread_id_fn_) {
319 		unsigned long me = evthread_id_fn_();
320 		if (lock->held_by != me)
321 			return 0;
322 	}
323 	return 1;
324 }
325 
326 void *
327 evthread_debug_get_real_lock_(void *lock_)
328 {
329 	struct debug_lock *lock = lock_;
330 	return lock->lock;
331 }
332 
333 void *
334 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
335 {
336 	/* there are four cases here:
337 	   1) we're turning on debugging; locking is not on.
338 	   2) we're turning on debugging; locking is on.
339 	   3) we're turning on locking; debugging is not on.
340 	   4) we're turning on locking; debugging is on. */
341 
342 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
343 		/* Case 1: allocate a debug lock. */
344 		EVUTIL_ASSERT(lock_ == NULL);
345 		return debug_lock_alloc(locktype);
346 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
347 		/* Case 2: wrap the lock in a debug lock. */
348 		struct debug_lock *lock;
349 		EVUTIL_ASSERT(lock_ != NULL);
350 
351 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
352 			/* We can't wrap it: We need a recursive lock */
353 			original_lock_fns_.free(lock_, locktype);
354 			return debug_lock_alloc(locktype);
355 		}
356 		lock = mm_malloc(sizeof(struct debug_lock));
357 		if (!lock) {
358 			original_lock_fns_.free(lock_, locktype);
359 			return NULL;
360 		}
361 		lock->lock = lock_;
362 		lock->locktype = locktype;
363 		lock->count = 0;
364 		lock->held_by = 0;
365 		return lock;
366 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
367 		/* Case 3: allocate a regular lock */
368 		EVUTIL_ASSERT(lock_ == NULL);
369 		return evthread_lock_fns_.alloc(locktype);
370 	} else {
371 		/* Case 4: Fill in a debug lock with a real lock */
372 		struct debug_lock *lock = lock_;
373 		EVUTIL_ASSERT(enable_locks &&
374 		              evthread_lock_debugging_enabled_);
375 		EVUTIL_ASSERT(lock->locktype == locktype);
376 		EVUTIL_ASSERT(lock->lock == NULL);
377 		lock->lock = original_lock_fns_.alloc(
378 			locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
379 		if (!lock->lock) {
380 			lock->count = -200;
381 			mm_free(lock);
382 			return NULL;
383 		}
384 		return lock;
385 	}
386 }
387 
388 
389 #ifndef EVTHREAD_EXPOSE_STRUCTS
390 unsigned long
391 evthreadimpl_get_id_()
392 {
393 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
394 }
395 void *
396 evthreadimpl_lock_alloc_(unsigned locktype)
397 {
398 	return evthread_lock_fns_.alloc ?
399 	    evthread_lock_fns_.alloc(locktype) : NULL;
400 }
401 void
402 evthreadimpl_lock_free_(void *lock, unsigned locktype)
403 {
404 	if (evthread_lock_fns_.free)
405 		evthread_lock_fns_.free(lock, locktype);
406 }
407 int
408 evthreadimpl_lock_lock_(unsigned mode, void *lock)
409 {
410 	if (evthread_lock_fns_.lock)
411 		return evthread_lock_fns_.lock(mode, lock);
412 	else
413 		return 0;
414 }
415 int
416 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
417 {
418 	if (evthread_lock_fns_.unlock)
419 		return evthread_lock_fns_.unlock(mode, lock);
420 	else
421 		return 0;
422 }
423 void *
424 evthreadimpl_cond_alloc_(unsigned condtype)
425 {
426 	return evthread_cond_fns_.alloc_condition ?
427 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
428 }
429 void
430 evthreadimpl_cond_free_(void *cond)
431 {
432 	if (evthread_cond_fns_.free_condition)
433 		evthread_cond_fns_.free_condition(cond);
434 }
435 int
436 evthreadimpl_cond_signal_(void *cond, int broadcast)
437 {
438 	if (evthread_cond_fns_.signal_condition)
439 		return evthread_cond_fns_.signal_condition(cond, broadcast);
440 	else
441 		return 0;
442 }
443 int
444 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
445 {
446 	if (evthread_cond_fns_.wait_condition)
447 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
448 	else
449 		return 0;
450 }
451 int
452 evthreadimpl_is_lock_debugging_enabled_(void)
453 {
454 	return evthread_lock_debugging_enabled_;
455 }
456 
457 int
458 evthreadimpl_locking_enabled_(void)
459 {
460 	return evthread_lock_fns_.lock != NULL;
461 }
462 #endif
463 
464 #endif
465