1c43e99fdSEd Maste /*
2c43e99fdSEd Maste * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
3c43e99fdSEd Maste *
4c43e99fdSEd Maste * Redistribution and use in source and binary forms, with or without
5c43e99fdSEd Maste * modification, are permitted provided that the following conditions
6c43e99fdSEd Maste * are met:
7c43e99fdSEd Maste * 1. Redistributions of source code must retain the above copyright
8c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer.
9c43e99fdSEd Maste * 2. Redistributions in binary form must reproduce the above copyright
10c43e99fdSEd Maste * notice, this list of conditions and the following disclaimer in the
11c43e99fdSEd Maste * documentation and/or other materials provided with the distribution.
12c43e99fdSEd Maste * 3. The name of the author may not be used to endorse or promote products
13c43e99fdSEd Maste * derived from this software without specific prior written permission.
14c43e99fdSEd Maste *
15c43e99fdSEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16c43e99fdSEd Maste * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17c43e99fdSEd Maste * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18c43e99fdSEd Maste * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19c43e99fdSEd Maste * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20c43e99fdSEd Maste * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21c43e99fdSEd Maste * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22c43e99fdSEd Maste * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23c43e99fdSEd Maste * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24c43e99fdSEd Maste * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25c43e99fdSEd Maste */
26c43e99fdSEd Maste #ifndef EVTHREAD_INTERNAL_H_INCLUDED_
27c43e99fdSEd Maste #define EVTHREAD_INTERNAL_H_INCLUDED_
28c43e99fdSEd Maste
29c43e99fdSEd Maste #ifdef __cplusplus
30c43e99fdSEd Maste extern "C" {
31c43e99fdSEd Maste #endif
32c43e99fdSEd Maste
33c43e99fdSEd Maste #include "event2/event-config.h"
34c43e99fdSEd Maste #include "evconfig-private.h"
35c43e99fdSEd Maste
36c43e99fdSEd Maste #include "event2/thread.h"
37c43e99fdSEd Maste #include "util-internal.h"
38c43e99fdSEd Maste
39c43e99fdSEd Maste struct event_base;
40c43e99fdSEd Maste
41*b50261e2SCy Schubert #if !defined(_WIN32) && !defined(__CYGWIN__)
42c43e99fdSEd Maste /* On Windows, the way we currently make DLLs, it's not allowed for us to
43c43e99fdSEd Maste * have shared global structures. Thus, we only do the direct-call-to-function
44c43e99fdSEd Maste * code path if we know that the local shared library system supports it.
45c43e99fdSEd Maste */
46c43e99fdSEd Maste #define EVTHREAD_EXPOSE_STRUCTS
47c43e99fdSEd Maste #endif
48c43e99fdSEd Maste
49c43e99fdSEd Maste #if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
50c43e99fdSEd Maste /* Global function pointers to lock-related functions. NULL if locking isn't
51c43e99fdSEd Maste enabled. */
52*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
53c43e99fdSEd Maste extern struct evthread_lock_callbacks evthread_lock_fns_;
54*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
55c43e99fdSEd Maste extern struct evthread_condition_callbacks evthread_cond_fns_;
56c43e99fdSEd Maste extern unsigned long (*evthread_id_fn_)(void);
57*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
58c43e99fdSEd Maste extern int evthread_lock_debugging_enabled_;
59c43e99fdSEd Maste
60c43e99fdSEd Maste /** Return the ID of the current thread, or 1 if threading isn't enabled. */
61c43e99fdSEd Maste #define EVTHREAD_GET_ID() \
62c43e99fdSEd Maste (evthread_id_fn_ ? evthread_id_fn_() : 1)
63c43e99fdSEd Maste
64c43e99fdSEd Maste /** Return true iff we're in the thread that is currently (or most recently)
65c43e99fdSEd Maste * running a given event_base's loop. Requires lock. */
66c43e99fdSEd Maste #define EVBASE_IN_THREAD(base) \
67c43e99fdSEd Maste (evthread_id_fn_ == NULL || \
68c43e99fdSEd Maste (base)->th_owner_id == evthread_id_fn_())
69c43e99fdSEd Maste
70c43e99fdSEd Maste /** Return true iff we need to notify the base's main thread about changes to
71c43e99fdSEd Maste * its state, because it's currently running the main loop in another
72c43e99fdSEd Maste * thread. Requires lock. */
73c43e99fdSEd Maste #define EVBASE_NEED_NOTIFY(base) \
74c43e99fdSEd Maste (evthread_id_fn_ != NULL && \
75c43e99fdSEd Maste (base)->running_loop && \
76c43e99fdSEd Maste (base)->th_owner_id != evthread_id_fn_())
77c43e99fdSEd Maste
78c43e99fdSEd Maste /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
79c43e99fdSEd Maste NULL if locking is not enabled. */
80c43e99fdSEd Maste #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
81c43e99fdSEd Maste ((lockvar) = evthread_lock_fns_.alloc ? \
82c43e99fdSEd Maste evthread_lock_fns_.alloc(locktype) : NULL)
83c43e99fdSEd Maste
84c43e99fdSEd Maste /** Free a given lock, if it is present and locking is enabled. */
85c43e99fdSEd Maste #define EVTHREAD_FREE_LOCK(lockvar, locktype) \
86c43e99fdSEd Maste do { \
87c43e99fdSEd Maste void *lock_tmp_ = (lockvar); \
88c43e99fdSEd Maste if (lock_tmp_ && evthread_lock_fns_.free) \
89c43e99fdSEd Maste evthread_lock_fns_.free(lock_tmp_, (locktype)); \
90c43e99fdSEd Maste } while (0)
91c43e99fdSEd Maste
92c43e99fdSEd Maste /** Acquire a lock. */
93c43e99fdSEd Maste #define EVLOCK_LOCK(lockvar,mode) \
94c43e99fdSEd Maste do { \
95c43e99fdSEd Maste if (lockvar) \
96c43e99fdSEd Maste evthread_lock_fns_.lock(mode, lockvar); \
97c43e99fdSEd Maste } while (0)
98c43e99fdSEd Maste
99c43e99fdSEd Maste /** Release a lock */
100c43e99fdSEd Maste #define EVLOCK_UNLOCK(lockvar,mode) \
101c43e99fdSEd Maste do { \
102c43e99fdSEd Maste if (lockvar) \
103c43e99fdSEd Maste evthread_lock_fns_.unlock(mode, lockvar); \
104c43e99fdSEd Maste } while (0)
105c43e99fdSEd Maste
106c43e99fdSEd Maste /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
107c43e99fdSEd Maste #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
108c43e99fdSEd Maste do { \
109c43e99fdSEd Maste if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
110c43e99fdSEd Maste void *tmp = lockvar1; \
111c43e99fdSEd Maste lockvar1 = lockvar2; \
112c43e99fdSEd Maste lockvar2 = tmp; \
113c43e99fdSEd Maste } \
114c43e99fdSEd Maste } while (0)
115c43e99fdSEd Maste
116c43e99fdSEd Maste /** Lock an event_base, if it is set up for locking. Acquires the lock
117c43e99fdSEd Maste in the base structure whose field is named 'lockvar'. */
118c43e99fdSEd Maste #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
119c43e99fdSEd Maste EVLOCK_LOCK((base)->lockvar, 0); \
120c43e99fdSEd Maste } while (0)
121c43e99fdSEd Maste
122c43e99fdSEd Maste /** Unlock an event_base, if it is set up for locking. */
123c43e99fdSEd Maste #define EVBASE_RELEASE_LOCK(base, lockvar) do { \
124c43e99fdSEd Maste EVLOCK_UNLOCK((base)->lockvar, 0); \
125c43e99fdSEd Maste } while (0)
126c43e99fdSEd Maste
127c43e99fdSEd Maste /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
128c43e99fdSEd Maste * locked and held by us. */
129c43e99fdSEd Maste #define EVLOCK_ASSERT_LOCKED(lock) \
130c43e99fdSEd Maste do { \
131c43e99fdSEd Maste if ((lock) && evthread_lock_debugging_enabled_) { \
132c43e99fdSEd Maste EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
133c43e99fdSEd Maste } \
134c43e99fdSEd Maste } while (0)
135c43e99fdSEd Maste
136c43e99fdSEd Maste /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
137c43e99fdSEd Maste * manage to get it. */
138c43e99fdSEd Maste static inline int EVLOCK_TRY_LOCK_(void *lock);
139c43e99fdSEd Maste static inline int
EVLOCK_TRY_LOCK_(void * lock)140c43e99fdSEd Maste EVLOCK_TRY_LOCK_(void *lock)
141c43e99fdSEd Maste {
142c43e99fdSEd Maste if (lock && evthread_lock_fns_.lock) {
143c43e99fdSEd Maste int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
144c43e99fdSEd Maste return !r;
145c43e99fdSEd Maste } else {
146c43e99fdSEd Maste /* Locking is disabled either globally or for this thing;
147c43e99fdSEd Maste * of course we count as having the lock. */
148c43e99fdSEd Maste return 1;
149c43e99fdSEd Maste }
150c43e99fdSEd Maste }
151c43e99fdSEd Maste
152c43e99fdSEd Maste /** Allocate a new condition variable and store it in the void *, condvar */
153c43e99fdSEd Maste #define EVTHREAD_ALLOC_COND(condvar) \
154c43e99fdSEd Maste do { \
155c43e99fdSEd Maste (condvar) = evthread_cond_fns_.alloc_condition ? \
156c43e99fdSEd Maste evthread_cond_fns_.alloc_condition(0) : NULL; \
157c43e99fdSEd Maste } while (0)
158c43e99fdSEd Maste /** Deallocate and free a condition variable in condvar */
159c43e99fdSEd Maste #define EVTHREAD_FREE_COND(cond) \
160c43e99fdSEd Maste do { \
161c43e99fdSEd Maste if (cond) \
162c43e99fdSEd Maste evthread_cond_fns_.free_condition((cond)); \
163c43e99fdSEd Maste } while (0)
164c43e99fdSEd Maste /** Signal one thread waiting on cond */
165c43e99fdSEd Maste #define EVTHREAD_COND_SIGNAL(cond) \
166c43e99fdSEd Maste ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
167c43e99fdSEd Maste /** Signal all threads waiting on cond */
168c43e99fdSEd Maste #define EVTHREAD_COND_BROADCAST(cond) \
169c43e99fdSEd Maste ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
170c43e99fdSEd Maste /** Wait until the condition 'cond' is signalled. Must be called while
171c43e99fdSEd Maste * holding 'lock'. The lock will be released until the condition is
172c43e99fdSEd Maste * signalled, at which point it will be acquired again. Returns 0 for
173c43e99fdSEd Maste * success, -1 for failure. */
174c43e99fdSEd Maste #define EVTHREAD_COND_WAIT(cond, lock) \
175c43e99fdSEd Maste ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
176c43e99fdSEd Maste /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
177c43e99fdSEd Maste * on timeout. */
178c43e99fdSEd Maste #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
179c43e99fdSEd Maste ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
180c43e99fdSEd Maste
181c43e99fdSEd Maste /** True iff locking functions have been configured. */
182c43e99fdSEd Maste #define EVTHREAD_LOCKING_ENABLED() \
183c43e99fdSEd Maste (evthread_lock_fns_.lock != NULL)
184c43e99fdSEd Maste
185c43e99fdSEd Maste #elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
186c43e99fdSEd Maste
187c43e99fdSEd Maste unsigned long evthreadimpl_get_id_(void);
188*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
189c43e99fdSEd Maste int evthreadimpl_is_lock_debugging_enabled_(void);
190*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
191c43e99fdSEd Maste void *evthreadimpl_lock_alloc_(unsigned locktype);
192*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
193c43e99fdSEd Maste void evthreadimpl_lock_free_(void *lock, unsigned locktype);
194*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
195c43e99fdSEd Maste int evthreadimpl_lock_lock_(unsigned mode, void *lock);
196*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
197c43e99fdSEd Maste int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
198*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
199c43e99fdSEd Maste void *evthreadimpl_cond_alloc_(unsigned condtype);
200*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
201c43e99fdSEd Maste void evthreadimpl_cond_free_(void *cond);
202*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
203c43e99fdSEd Maste int evthreadimpl_cond_signal_(void *cond, int broadcast);
204*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
205c43e99fdSEd Maste int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
206c43e99fdSEd Maste int evthreadimpl_locking_enabled_(void);
207c43e99fdSEd Maste
208c43e99fdSEd Maste #define EVTHREAD_GET_ID() evthreadimpl_get_id_()
209c43e99fdSEd Maste #define EVBASE_IN_THREAD(base) \
210c43e99fdSEd Maste ((base)->th_owner_id == evthreadimpl_get_id_())
211c43e99fdSEd Maste #define EVBASE_NEED_NOTIFY(base) \
212c43e99fdSEd Maste ((base)->running_loop && \
213c43e99fdSEd Maste ((base)->th_owner_id != evthreadimpl_get_id_()))
214c43e99fdSEd Maste
215c43e99fdSEd Maste #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
216c43e99fdSEd Maste ((lockvar) = evthreadimpl_lock_alloc_(locktype))
217c43e99fdSEd Maste
218c43e99fdSEd Maste #define EVTHREAD_FREE_LOCK(lockvar, locktype) \
219c43e99fdSEd Maste do { \
220c43e99fdSEd Maste void *lock_tmp_ = (lockvar); \
221c43e99fdSEd Maste if (lock_tmp_) \
222c43e99fdSEd Maste evthreadimpl_lock_free_(lock_tmp_, (locktype)); \
223c43e99fdSEd Maste } while (0)
224c43e99fdSEd Maste
225c43e99fdSEd Maste /** Acquire a lock. */
226c43e99fdSEd Maste #define EVLOCK_LOCK(lockvar,mode) \
227c43e99fdSEd Maste do { \
228c43e99fdSEd Maste if (lockvar) \
229c43e99fdSEd Maste evthreadimpl_lock_lock_(mode, lockvar); \
230c43e99fdSEd Maste } while (0)
231c43e99fdSEd Maste
232c43e99fdSEd Maste /** Release a lock */
233c43e99fdSEd Maste #define EVLOCK_UNLOCK(lockvar,mode) \
234c43e99fdSEd Maste do { \
235c43e99fdSEd Maste if (lockvar) \
236c43e99fdSEd Maste evthreadimpl_lock_unlock_(mode, lockvar); \
237c43e99fdSEd Maste } while (0)
238c43e99fdSEd Maste
239c43e99fdSEd Maste /** Lock an event_base, if it is set up for locking. Acquires the lock
240c43e99fdSEd Maste in the base structure whose field is named 'lockvar'. */
241c43e99fdSEd Maste #define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
242c43e99fdSEd Maste EVLOCK_LOCK((base)->lockvar, 0); \
243c43e99fdSEd Maste } while (0)
244c43e99fdSEd Maste
245c43e99fdSEd Maste /** Unlock an event_base, if it is set up for locking. */
246c43e99fdSEd Maste #define EVBASE_RELEASE_LOCK(base, lockvar) do { \
247c43e99fdSEd Maste EVLOCK_UNLOCK((base)->lockvar, 0); \
248c43e99fdSEd Maste } while (0)
249c43e99fdSEd Maste
250c43e99fdSEd Maste /** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
251c43e99fdSEd Maste * locked and held by us. */
252c43e99fdSEd Maste #define EVLOCK_ASSERT_LOCKED(lock) \
253c43e99fdSEd Maste do { \
254c43e99fdSEd Maste if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
255c43e99fdSEd Maste EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
256c43e99fdSEd Maste } \
257c43e99fdSEd Maste } while (0)
258c43e99fdSEd Maste
259c43e99fdSEd Maste /** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
260c43e99fdSEd Maste * manage to get it. */
261c43e99fdSEd Maste static inline int EVLOCK_TRY_LOCK_(void *lock);
262c43e99fdSEd Maste static inline int
EVLOCK_TRY_LOCK_(void * lock)263c43e99fdSEd Maste EVLOCK_TRY_LOCK_(void *lock)
264c43e99fdSEd Maste {
265c43e99fdSEd Maste if (lock) {
266c43e99fdSEd Maste int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
267c43e99fdSEd Maste return !r;
268c43e99fdSEd Maste } else {
269c43e99fdSEd Maste /* Locking is disabled either globally or for this thing;
270c43e99fdSEd Maste * of course we count as having the lock. */
271c43e99fdSEd Maste return 1;
272c43e99fdSEd Maste }
273c43e99fdSEd Maste }
274c43e99fdSEd Maste
275c43e99fdSEd Maste /** Allocate a new condition variable and store it in the void *, condvar */
276c43e99fdSEd Maste #define EVTHREAD_ALLOC_COND(condvar) \
277c43e99fdSEd Maste do { \
278c43e99fdSEd Maste (condvar) = evthreadimpl_cond_alloc_(0); \
279c43e99fdSEd Maste } while (0)
280c43e99fdSEd Maste /** Deallocate and free a condition variable in condvar */
281c43e99fdSEd Maste #define EVTHREAD_FREE_COND(cond) \
282c43e99fdSEd Maste do { \
283c43e99fdSEd Maste if (cond) \
284c43e99fdSEd Maste evthreadimpl_cond_free_((cond)); \
285c43e99fdSEd Maste } while (0)
286c43e99fdSEd Maste /** Signal one thread waiting on cond */
287c43e99fdSEd Maste #define EVTHREAD_COND_SIGNAL(cond) \
288c43e99fdSEd Maste ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
289c43e99fdSEd Maste /** Signal all threads waiting on cond */
290c43e99fdSEd Maste #define EVTHREAD_COND_BROADCAST(cond) \
291c43e99fdSEd Maste ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
292c43e99fdSEd Maste /** Wait until the condition 'cond' is signalled. Must be called while
293c43e99fdSEd Maste * holding 'lock'. The lock will be released until the condition is
294c43e99fdSEd Maste * signalled, at which point it will be acquired again. Returns 0 for
295c43e99fdSEd Maste * success, -1 for failure. */
296c43e99fdSEd Maste #define EVTHREAD_COND_WAIT(cond, lock) \
297c43e99fdSEd Maste ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
298c43e99fdSEd Maste /** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
299c43e99fdSEd Maste * on timeout. */
300c43e99fdSEd Maste #define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
301c43e99fdSEd Maste ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
302c43e99fdSEd Maste
303c43e99fdSEd Maste #define EVTHREAD_LOCKING_ENABLED() \
304c43e99fdSEd Maste (evthreadimpl_locking_enabled_())
305c43e99fdSEd Maste
306c43e99fdSEd Maste #else /* EVENT__DISABLE_THREAD_SUPPORT */
307c43e99fdSEd Maste
308c43e99fdSEd Maste #define EVTHREAD_GET_ID() 1
309c43e99fdSEd Maste #define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
310c43e99fdSEd Maste #define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
311c43e99fdSEd Maste
312c43e99fdSEd Maste #define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
313c43e99fdSEd Maste #define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
314c43e99fdSEd Maste #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
315c43e99fdSEd Maste #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
316c43e99fdSEd Maste
317c43e99fdSEd Maste #define EVBASE_IN_THREAD(base) 1
318c43e99fdSEd Maste #define EVBASE_NEED_NOTIFY(base) 0
319c43e99fdSEd Maste #define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
320c43e99fdSEd Maste #define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
321c43e99fdSEd Maste #define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
322c43e99fdSEd Maste
323c43e99fdSEd Maste #define EVLOCK_TRY_LOCK_(lock) 1
324c43e99fdSEd Maste
325c43e99fdSEd Maste #define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
326c43e99fdSEd Maste #define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
327c43e99fdSEd Maste #define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
328c43e99fdSEd Maste #define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
329c43e99fdSEd Maste #define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
330c43e99fdSEd Maste #define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
331c43e99fdSEd Maste
332c43e99fdSEd Maste #define EVTHREAD_LOCKING_ENABLED() 0
333c43e99fdSEd Maste
334c43e99fdSEd Maste #endif
335c43e99fdSEd Maste
336c43e99fdSEd Maste /* This code is shared between both lock impls */
337c43e99fdSEd Maste #if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
338c43e99fdSEd Maste /** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
339c43e99fdSEd Maste #define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
340c43e99fdSEd Maste do { \
341c43e99fdSEd Maste if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
342c43e99fdSEd Maste void *tmp = lockvar1; \
343c43e99fdSEd Maste lockvar1 = lockvar2; \
344c43e99fdSEd Maste lockvar2 = tmp; \
345c43e99fdSEd Maste } \
346c43e99fdSEd Maste } while (0)
347c43e99fdSEd Maste
348c43e99fdSEd Maste /** Acquire both lock1 and lock2. Always allocates locks in the same order,
349c43e99fdSEd Maste * so that two threads locking two locks with LOCK2 will not deadlock. */
350c43e99fdSEd Maste #define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
351c43e99fdSEd Maste do { \
352c43e99fdSEd Maste void *lock1_tmplock_ = (lock1); \
353c43e99fdSEd Maste void *lock2_tmplock_ = (lock2); \
354c43e99fdSEd Maste EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
355c43e99fdSEd Maste EVLOCK_LOCK(lock1_tmplock_,mode1); \
356c43e99fdSEd Maste if (lock2_tmplock_ != lock1_tmplock_) \
357c43e99fdSEd Maste EVLOCK_LOCK(lock2_tmplock_,mode2); \
358c43e99fdSEd Maste } while (0)
359c43e99fdSEd Maste /** Release both lock1 and lock2. */
360c43e99fdSEd Maste #define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
361c43e99fdSEd Maste do { \
362c43e99fdSEd Maste void *lock1_tmplock_ = (lock1); \
363c43e99fdSEd Maste void *lock2_tmplock_ = (lock2); \
364c43e99fdSEd Maste EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
365c43e99fdSEd Maste if (lock2_tmplock_ != lock1_tmplock_) \
366c43e99fdSEd Maste EVLOCK_UNLOCK(lock2_tmplock_,mode2); \
367c43e99fdSEd Maste EVLOCK_UNLOCK(lock1_tmplock_,mode1); \
368c43e99fdSEd Maste } while (0)
369c43e99fdSEd Maste
370*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
371c43e99fdSEd Maste int evthread_is_debug_lock_held_(void *lock);
372c43e99fdSEd Maste void *evthread_debug_get_real_lock_(void *lock);
373c43e99fdSEd Maste
374c43e99fdSEd Maste void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
375c43e99fdSEd Maste int enable_locks);
376c43e99fdSEd Maste
377c43e99fdSEd Maste #define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
378c43e99fdSEd Maste do { \
379c43e99fdSEd Maste lockvar = evthread_setup_global_lock_(lockvar, \
380c43e99fdSEd Maste (locktype), enable_locks); \
381c43e99fdSEd Maste if (!lockvar) { \
382c43e99fdSEd Maste event_warn("Couldn't allocate %s", #lockvar); \
383c43e99fdSEd Maste return -1; \
384c43e99fdSEd Maste } \
385c43e99fdSEd Maste } while (0);
386c43e99fdSEd Maste
387c43e99fdSEd Maste int event_global_setup_locks_(const int enable_locks);
388c43e99fdSEd Maste int evsig_global_setup_locks_(const int enable_locks);
389c43e99fdSEd Maste int evutil_global_setup_locks_(const int enable_locks);
390c43e99fdSEd Maste int evutil_secure_rng_global_setup_locks_(const int enable_locks);
391c43e99fdSEd Maste
392c43e99fdSEd Maste /** Return current evthread_lock_callbacks */
393*b50261e2SCy Schubert EVENT2_EXPORT_SYMBOL
394c43e99fdSEd Maste struct evthread_lock_callbacks *evthread_get_lock_callbacks(void);
395c43e99fdSEd Maste /** Return current evthread_condition_callbacks */
396c43e99fdSEd Maste struct evthread_condition_callbacks *evthread_get_condition_callbacks(void);
397c43e99fdSEd Maste /** Disable locking for internal usage (like global shutdown) */
398c43e99fdSEd Maste void evthreadimpl_disable_lock_debugging_(void);
399c43e99fdSEd Maste
400c43e99fdSEd Maste #endif
401c43e99fdSEd Maste
402c43e99fdSEd Maste #ifdef __cplusplus
403c43e99fdSEd Maste }
404c43e99fdSEd Maste #endif
405c43e99fdSEd Maste
406c43e99fdSEd Maste #endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */
407