xref: /freebsd/crypto/openssl/crypto/threads_pthread.c (revision e7be843b4a162e68651d3911f0357ed464915629)
1 /*
2  * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
12 
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include "internal/cryptlib.h"
16 #include "internal/rcu.h"
17 #include "rcu_internal.h"
18 
19 #if defined(__clang__) && defined(__has_feature)
20 # if __has_feature(thread_sanitizer)
21 #  define __SANITIZE_THREAD__
22 # endif
23 #endif
24 
25 #if defined(__SANITIZE_THREAD__)
26 # include <sanitizer/tsan_interface.h>
27 # define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28 __tsan_mutex_post_unlock((x), 0)
29 
30 # define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31 __tsan_mutex_post_lock((x), 0, 0)
32 #else
33 # define TSAN_FAKE_UNLOCK(x)
34 # define TSAN_FAKE_LOCK(x)
35 #endif
36 
37 #if defined(__sun)
38 # include <atomic.h>
39 #endif
40 
41 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42 /*
43  * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44  * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45  * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46  * All of this makes impossible to use __atomic_is_lock_free here.
47  *
48  * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49  */
50 # define BROKEN_CLANG_ATOMICS
51 #endif
52 
53 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54 
55 # if defined(OPENSSL_SYS_UNIX)
56 #  include <sys/types.h>
57 #  include <unistd.h>
58 # endif
59 
60 # include <assert.h>
61 
62 /*
63  * The Non-Stop KLT thread model currently seems broken in its rwlock
64  * implementation
65  */
66 # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)
67 #  define USE_RWLOCK
68 # endif
69 
70 /*
71  * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
72  * other compilers.
73 
74  * Unfortunately, we can't do that with some "generic type", because there's no
75  * guarantee that the chosen generic type is large enough to cover all cases.
76  * Therefore, we implement fallbacks for each applicable type, with composed
77  * names that include the type they handle.
78  *
79  * (an anecdote: we previously tried to use |void *| as the generic type, with
80  * the thought that the pointer itself is the largest type.  However, this is
81  * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
82  *
83  * All applicable ATOMIC_ macros take the intended type as first parameter, so
84  * they can map to the correct fallback function.  In the GNU/clang case, that
85  * parameter is simply ignored.
86  */
87 
88 /*
89  * Internal types used with the ATOMIC_ macros, to make it possible to compose
90  * fallback function names.
91  */
92 typedef void *pvoid;
93 
94 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
95     && !defined(USE_ATOMIC_FALLBACKS)
96 #  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
97 #  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
98 #  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
99 #  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
100 #  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
101 # else
102 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
103 
104 #  define IMPL_fallback_atomic_load_n(t)                        \
105     static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
106     {                                                           \
107         t ret;                                                  \
108                                                                 \
109         pthread_mutex_lock(&atomic_sim_lock);                   \
110         ret = *p;                                               \
111         pthread_mutex_unlock(&atomic_sim_lock);                 \
112         return ret;                                             \
113     }
114 IMPL_fallback_atomic_load_n(uint32_t)
IMPL_fallback_atomic_load_n(uint64_t)115 IMPL_fallback_atomic_load_n(uint64_t)
116 IMPL_fallback_atomic_load_n(pvoid)
117 
118 #  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
119 
120 #  define IMPL_fallback_atomic_store_n(t)                       \
121     static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
122     {                                                           \
123         t ret;                                                  \
124                                                                 \
125         pthread_mutex_lock(&atomic_sim_lock);                   \
126         ret = *p;                                               \
127         *p = v;                                                 \
128         pthread_mutex_unlock(&atomic_sim_lock);                 \
129         return ret;                                             \
130     }
131 IMPL_fallback_atomic_store_n(uint32_t)
132 
133 #  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
134 
135 #  define IMPL_fallback_atomic_store(t)                         \
136     static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
137     {                                                           \
138         pthread_mutex_lock(&atomic_sim_lock);                   \
139         *p = *v;                                                \
140         pthread_mutex_unlock(&atomic_sim_lock);                 \
141     }
142 IMPL_fallback_atomic_store(pvoid)
143 
144 #  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
145 
146 /*
147  * The fallbacks that follow don't need any per type implementation, as
148  * they are designed for uint64_t only.  If there comes a time when multiple
149  * types need to be covered, it's relatively easy to refactor them the same
150  * way as the fallbacks above.
151  */
152 
153 static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
154 {
155     uint64_t ret;
156 
157     pthread_mutex_lock(&atomic_sim_lock);
158     *p += v;
159     ret = *p;
160     pthread_mutex_unlock(&atomic_sim_lock);
161     return ret;
162 }
163 
164 #  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
165 
fallback_atomic_sub_fetch(uint64_t * p,uint64_t v)166 static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
167 {
168     uint64_t ret;
169 
170     pthread_mutex_lock(&atomic_sim_lock);
171     *p -= v;
172     ret = *p;
173     pthread_mutex_unlock(&atomic_sim_lock);
174     return ret;
175 }
176 
177 #  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
178 # endif
179 
180 /*
181  * This is the core of an rcu lock. It tracks the readers and writers for the
182  * current quiescence point for a given lock. Users is the 64 bit value that
183  * stores the READERS/ID as defined above
184  *
185  */
186 struct rcu_qp {
187     uint64_t users;
188 };
189 
190 struct thread_qp {
191     struct rcu_qp *qp;
192     unsigned int depth;
193     CRYPTO_RCU_LOCK *lock;
194 };
195 
196 # define MAX_QPS 10
197 /*
198  * This is the per thread tracking data
199  * that is assigned to each thread participating
200  * in an rcu qp
201  *
202  * qp points to the qp that it last acquired
203  *
204  */
205 struct rcu_thr_data {
206     struct thread_qp thread_qps[MAX_QPS];
207 };
208 
209 /*
210  * This is the internal version of a CRYPTO_RCU_LOCK
211  * it is cast from CRYPTO_RCU_LOCK
212  */
213 struct rcu_lock_st {
214     /* Callbacks to call for next ossl_synchronize_rcu */
215     struct rcu_cb_item *cb_items;
216 
217     /* The context we are being created against */
218     OSSL_LIB_CTX *ctx;
219 
220     /* Array of quiescent points for synchronization */
221     struct rcu_qp *qp_group;
222 
223     /* rcu generation counter for in-order retirement */
224     uint32_t id_ctr;
225 
226     /* Number of elements in qp_group array */
227     uint32_t group_count;
228 
229     /* Index of the current qp in the qp_group array */
230     uint32_t reader_idx;
231 
232     /* value of the next id_ctr value to be retired */
233     uint32_t next_to_retire;
234 
235     /* index of the next free rcu_qp in the qp_group */
236     uint32_t current_alloc_idx;
237 
238     /* number of qp's in qp_group array currently being retired */
239     uint32_t writers_alloced;
240 
241     /* lock protecting write side operations */
242     pthread_mutex_t write_lock;
243 
244     /* lock protecting updates to writers_alloced/current_alloc_idx */
245     pthread_mutex_t alloc_lock;
246 
247     /* signal to wake threads waiting on alloc_lock */
248     pthread_cond_t alloc_signal;
249 
250     /* lock to enforce in-order retirement */
251     pthread_mutex_t prior_lock;
252 
253     /* signal to wake threads waiting on prior_lock */
254     pthread_cond_t prior_signal;
255 };
256 
257 /* Read side acquisition of the current qp */
get_hold_current_qp(struct rcu_lock_st * lock)258 static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
259 {
260     uint32_t qp_idx;
261 
262     /* get the current qp index */
263     for (;;) {
264         qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
265 
266         /*
267          * Notes on use of __ATOMIC_ACQUIRE
268          * We need to ensure the following:
269          * 1) That subsequent operations aren't optimized by hoisting them above
270          * this operation.  Specifically, we don't want the below re-load of
271          * qp_idx to get optimized away
272          * 2) We want to ensure that any updating of reader_idx on the write side
273          * of the lock is flushed from a local cpu cache so that we see any
274          * updates prior to the load.  This is a non-issue on cache coherent
275          * systems like x86, but is relevant on other arches
276          */
277         ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
278                          __ATOMIC_ACQUIRE);
279 
280         /* if the idx hasn't changed, we're good, else try again */
281         if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
282                                     __ATOMIC_RELAXED))
283             break;
284 
285         ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
286                          __ATOMIC_RELAXED);
287     }
288 
289     return &lock->qp_group[qp_idx];
290 }
291 
ossl_rcu_free_local_data(void * arg)292 static void ossl_rcu_free_local_data(void *arg)
293 {
294     OSSL_LIB_CTX *ctx = arg;
295     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
296     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
297 
298     OPENSSL_free(data);
299     CRYPTO_THREAD_set_local(lkey, NULL);
300 }
301 
ossl_rcu_read_lock(CRYPTO_RCU_LOCK * lock)302 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
303 {
304     struct rcu_thr_data *data;
305     int i, available_qp = -1;
306     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
307 
308     /*
309      * we're going to access current_qp here so ask the
310      * processor to fetch it
311      */
312     data = CRYPTO_THREAD_get_local(lkey);
313 
314     if (data == NULL) {
315         data = OPENSSL_zalloc(sizeof(*data));
316         OPENSSL_assert(data != NULL);
317         CRYPTO_THREAD_set_local(lkey, data);
318         ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
319     }
320 
321     for (i = 0; i < MAX_QPS; i++) {
322         if (data->thread_qps[i].qp == NULL && available_qp == -1)
323             available_qp = i;
324         /* If we have a hold on this lock already, we're good */
325         if (data->thread_qps[i].lock == lock) {
326             data->thread_qps[i].depth++;
327             return;
328         }
329     }
330 
331     /*
332      * if we get here, then we don't have a hold on this lock yet
333      */
334     assert(available_qp != -1);
335 
336     data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
337     data->thread_qps[available_qp].depth = 1;
338     data->thread_qps[available_qp].lock = lock;
339 }
340 
ossl_rcu_read_unlock(CRYPTO_RCU_LOCK * lock)341 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
342 {
343     int i;
344     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
345     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
346     uint64_t ret;
347 
348     assert(data != NULL);
349 
350     for (i = 0; i < MAX_QPS; i++) {
351         if (data->thread_qps[i].lock == lock) {
352             /*
353              * we have to use __ATOMIC_RELEASE here
354              * to ensure that all preceding read instructions complete
355              * before the decrement is visible to ossl_synchronize_rcu
356              */
357             data->thread_qps[i].depth--;
358             if (data->thread_qps[i].depth == 0) {
359                 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
360                                        (uint64_t)1, __ATOMIC_RELEASE);
361                 OPENSSL_assert(ret != UINT64_MAX);
362                 data->thread_qps[i].qp = NULL;
363                 data->thread_qps[i].lock = NULL;
364             }
365             return;
366         }
367     }
368     /*
369      * If we get here, we're trying to unlock a lock that we never acquired -
370      * that's fatal.
371      */
372     assert(0);
373 }
374 
375 /*
376  * Write side allocation routine to get the current qp
377  * and replace it with a new one
378  */
update_qp(CRYPTO_RCU_LOCK * lock,uint32_t * curr_id)379 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
380 {
381     uint32_t current_idx;
382 
383     pthread_mutex_lock(&lock->alloc_lock);
384 
385     /*
386      * we need at least one qp to be available with one
387      * left over, so that readers can start working on
388      * one that isn't yet being waited on
389      */
390     while (lock->group_count - lock->writers_alloced < 2)
391         /* we have to wait for one to be free */
392         pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
393 
394     current_idx = lock->current_alloc_idx;
395 
396     /* Allocate the qp */
397     lock->writers_alloced++;
398 
399     /* increment the allocation index */
400     lock->current_alloc_idx =
401         (lock->current_alloc_idx + 1) % lock->group_count;
402 
403     *curr_id = lock->id_ctr;
404     lock->id_ctr++;
405 
406     ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
407                    __ATOMIC_RELAXED);
408 
409     /*
410      * this should make sure that the new value of reader_idx is visible in
411      * get_hold_current_qp, directly after incrementing the users count
412      */
413     ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
414                      __ATOMIC_RELEASE);
415 
416     /* wake up any waiters */
417     pthread_cond_signal(&lock->alloc_signal);
418     pthread_mutex_unlock(&lock->alloc_lock);
419     return &lock->qp_group[current_idx];
420 }
421 
retire_qp(CRYPTO_RCU_LOCK * lock,struct rcu_qp * qp)422 static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
423 {
424     pthread_mutex_lock(&lock->alloc_lock);
425     lock->writers_alloced--;
426     pthread_cond_signal(&lock->alloc_signal);
427     pthread_mutex_unlock(&lock->alloc_lock);
428 }
429 
allocate_new_qp_group(CRYPTO_RCU_LOCK * lock,uint32_t count)430 static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
431                                             uint32_t count)
432 {
433     struct rcu_qp *new =
434         OPENSSL_zalloc(sizeof(*new) * count);
435 
436     lock->group_count = count;
437     return new;
438 }
439 
ossl_rcu_write_lock(CRYPTO_RCU_LOCK * lock)440 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
441 {
442     pthread_mutex_lock(&lock->write_lock);
443     TSAN_FAKE_UNLOCK(&lock->write_lock);
444 }
445 
ossl_rcu_write_unlock(CRYPTO_RCU_LOCK * lock)446 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
447 {
448     TSAN_FAKE_LOCK(&lock->write_lock);
449     pthread_mutex_unlock(&lock->write_lock);
450 }
451 
ossl_synchronize_rcu(CRYPTO_RCU_LOCK * lock)452 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
453 {
454     struct rcu_qp *qp;
455     uint64_t count;
456     uint32_t curr_id;
457     struct rcu_cb_item *cb_items, *tmpcb;
458 
459     pthread_mutex_lock(&lock->write_lock);
460     cb_items = lock->cb_items;
461     lock->cb_items = NULL;
462     pthread_mutex_unlock(&lock->write_lock);
463 
464     qp = update_qp(lock, &curr_id);
465 
466     /* retire in order */
467     pthread_mutex_lock(&lock->prior_lock);
468     while (lock->next_to_retire != curr_id)
469         pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
470 
471     /*
472      * wait for the reader count to reach zero
473      * Note the use of __ATOMIC_ACQUIRE here to ensure that any
474      * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
475      * is visible prior to our read
476      * however this is likely just necessary to silence a tsan warning
477      * because the read side should not do any write operation
478      * outside the atomic itself
479      */
480     do {
481         count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
482     } while (count != (uint64_t)0);
483 
484     lock->next_to_retire++;
485     pthread_cond_broadcast(&lock->prior_signal);
486     pthread_mutex_unlock(&lock->prior_lock);
487 
488     retire_qp(lock, qp);
489 
490     /* handle any callbacks that we have */
491     while (cb_items != NULL) {
492         tmpcb = cb_items;
493         cb_items = cb_items->next;
494         tmpcb->fn(tmpcb->data);
495         OPENSSL_free(tmpcb);
496     }
497 }
498 
499 /*
500  * Note: This call assumes its made under the protection of
501  * ossl_rcu_write_lock
502  */
ossl_rcu_call(CRYPTO_RCU_LOCK * lock,rcu_cb_fn cb,void * data)503 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
504 {
505     struct rcu_cb_item *new =
506         OPENSSL_zalloc(sizeof(*new));
507 
508     if (new == NULL)
509         return 0;
510 
511     new->data = data;
512     new->fn = cb;
513 
514     new->next = lock->cb_items;
515     lock->cb_items = new;
516 
517     return 1;
518 }
519 
ossl_rcu_uptr_deref(void ** p)520 void *ossl_rcu_uptr_deref(void **p)
521 {
522     return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
523 }
524 
ossl_rcu_assign_uptr(void ** p,void ** v)525 void ossl_rcu_assign_uptr(void **p, void **v)
526 {
527     ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
528 }
529 
ossl_rcu_lock_new(int num_writers,OSSL_LIB_CTX * ctx)530 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
531 {
532     struct rcu_lock_st *new;
533 
534     /*
535      * We need a minimum of 2 qp's
536      */
537     if (num_writers < 2)
538         num_writers = 2;
539 
540     ctx = ossl_lib_ctx_get_concrete(ctx);
541     if (ctx == NULL)
542         return 0;
543 
544     new = OPENSSL_zalloc(sizeof(*new));
545     if (new == NULL)
546         return NULL;
547 
548     new->ctx = ctx;
549     pthread_mutex_init(&new->write_lock, NULL);
550     pthread_mutex_init(&new->prior_lock, NULL);
551     pthread_mutex_init(&new->alloc_lock, NULL);
552     pthread_cond_init(&new->prior_signal, NULL);
553     pthread_cond_init(&new->alloc_signal, NULL);
554 
555     new->qp_group = allocate_new_qp_group(new, num_writers);
556     if (new->qp_group == NULL) {
557         OPENSSL_free(new);
558         new = NULL;
559     }
560 
561     return new;
562 }
563 
ossl_rcu_lock_free(CRYPTO_RCU_LOCK * lock)564 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
565 {
566     struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
567 
568     if (lock == NULL)
569         return;
570 
571     /* make sure we're synchronized */
572     ossl_synchronize_rcu(rlock);
573 
574     OPENSSL_free(rlock->qp_group);
575     /* There should only be a single qp left now */
576     OPENSSL_free(rlock);
577 }
578 
CRYPTO_THREAD_lock_new(void)579 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
580 {
581 # ifdef USE_RWLOCK
582     CRYPTO_RWLOCK *lock;
583 
584     if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
585         /* Don't set error, to avoid recursion blowup. */
586         return NULL;
587 
588     if (pthread_rwlock_init(lock, NULL) != 0) {
589         OPENSSL_free(lock);
590         return NULL;
591     }
592 # else
593     pthread_mutexattr_t attr;
594     CRYPTO_RWLOCK *lock;
595 
596     if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
597         /* Don't set error, to avoid recursion blowup. */
598         return NULL;
599 
600     /*
601      * We don't use recursive mutexes, but try to catch errors if we do.
602      */
603     pthread_mutexattr_init(&attr);
604 #  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
605 #   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
606     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
607 #   endif
608 #  else
609     /* The SPT Thread Library does not define MUTEX attributes. */
610 #  endif
611 
612     if (pthread_mutex_init(lock, &attr) != 0) {
613         pthread_mutexattr_destroy(&attr);
614         OPENSSL_free(lock);
615         return NULL;
616     }
617 
618     pthread_mutexattr_destroy(&attr);
619 # endif
620 
621     return lock;
622 }
623 
CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK * lock)624 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
625 {
626 # ifdef USE_RWLOCK
627     if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
628         return 0;
629 # else
630     if (pthread_mutex_lock(lock) != 0) {
631         assert(errno != EDEADLK && errno != EBUSY);
632         return 0;
633     }
634 # endif
635 
636     return 1;
637 }
638 
CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK * lock)639 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
640 {
641 # ifdef USE_RWLOCK
642     if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
643         return 0;
644 # else
645     if (pthread_mutex_lock(lock) != 0) {
646         assert(errno != EDEADLK && errno != EBUSY);
647         return 0;
648     }
649 # endif
650 
651     return 1;
652 }
653 
CRYPTO_THREAD_unlock(CRYPTO_RWLOCK * lock)654 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
655 {
656 # ifdef USE_RWLOCK
657     if (pthread_rwlock_unlock(lock) != 0)
658         return 0;
659 # else
660     if (pthread_mutex_unlock(lock) != 0) {
661         assert(errno != EPERM);
662         return 0;
663     }
664 # endif
665 
666     return 1;
667 }
668 
CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK * lock)669 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
670 {
671     if (lock == NULL)
672         return;
673 
674 # ifdef USE_RWLOCK
675     pthread_rwlock_destroy(lock);
676 # else
677     pthread_mutex_destroy(lock);
678 # endif
679     OPENSSL_free(lock);
680 
681     return;
682 }
683 
CRYPTO_THREAD_run_once(CRYPTO_ONCE * once,void (* init)(void))684 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
685 {
686     if (pthread_once(once, init) != 0)
687         return 0;
688 
689     return 1;
690 }
691 
CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL * key,void (* cleanup)(void *))692 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
693 {
694     if (pthread_key_create(key, cleanup) != 0)
695         return 0;
696 
697     return 1;
698 }
699 
CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL * key)700 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
701 {
702     return pthread_getspecific(*key);
703 }
704 
CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL * key,void * val)705 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
706 {
707     if (pthread_setspecific(*key, val) != 0)
708         return 0;
709 
710     return 1;
711 }
712 
CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL * key)713 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
714 {
715     if (pthread_key_delete(*key) != 0)
716         return 0;
717 
718     return 1;
719 }
720 
CRYPTO_THREAD_get_current_id(void)721 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
722 {
723     return pthread_self();
724 }
725 
CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a,CRYPTO_THREAD_ID b)726 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
727 {
728     return pthread_equal(a, b);
729 }
730 
CRYPTO_atomic_add(int * val,int amount,int * ret,CRYPTO_RWLOCK * lock)731 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
732 {
733 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
734     if (__atomic_is_lock_free(sizeof(*val), val)) {
735         *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
736         return 1;
737     }
738 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
739     /* This will work for all future Solaris versions. */
740     if (ret != NULL) {
741         *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
742         return 1;
743     }
744 # endif
745     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
746         return 0;
747 
748     *val += amount;
749     *ret  = *val;
750 
751     if (!CRYPTO_THREAD_unlock(lock))
752         return 0;
753 
754     return 1;
755 }
756 
CRYPTO_atomic_add64(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)757 int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
758                         CRYPTO_RWLOCK *lock)
759 {
760 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
761     if (__atomic_is_lock_free(sizeof(*val), val)) {
762         *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
763         return 1;
764     }
765 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
766     /* This will work for all future Solaris versions. */
767     if (ret != NULL) {
768         *ret = atomic_add_64_nv(val, op);
769         return 1;
770     }
771 # endif
772     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
773         return 0;
774     *val += op;
775     *ret  = *val;
776 
777     if (!CRYPTO_THREAD_unlock(lock))
778         return 0;
779 
780     return 1;
781 }
782 
CRYPTO_atomic_and(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)783 int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
784                       CRYPTO_RWLOCK *lock)
785 {
786 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
787     if (__atomic_is_lock_free(sizeof(*val), val)) {
788         *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
789         return 1;
790     }
791 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
792     /* This will work for all future Solaris versions. */
793     if (ret != NULL) {
794         *ret = atomic_and_64_nv(val, op);
795         return 1;
796     }
797 # endif
798     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
799         return 0;
800     *val &= op;
801     *ret  = *val;
802 
803     if (!CRYPTO_THREAD_unlock(lock))
804         return 0;
805 
806     return 1;
807 }
808 
CRYPTO_atomic_or(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)809 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
810                      CRYPTO_RWLOCK *lock)
811 {
812 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
813     if (__atomic_is_lock_free(sizeof(*val), val)) {
814         *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
815         return 1;
816     }
817 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
818     /* This will work for all future Solaris versions. */
819     if (ret != NULL) {
820         *ret = atomic_or_64_nv(val, op);
821         return 1;
822     }
823 # endif
824     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
825         return 0;
826     *val |= op;
827     *ret  = *val;
828 
829     if (!CRYPTO_THREAD_unlock(lock))
830         return 0;
831 
832     return 1;
833 }
834 
CRYPTO_atomic_load(uint64_t * val,uint64_t * ret,CRYPTO_RWLOCK * lock)835 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
836 {
837 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
838     if (__atomic_is_lock_free(sizeof(*val), val)) {
839         __atomic_load(val, ret, __ATOMIC_ACQUIRE);
840         return 1;
841     }
842 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
843     /* This will work for all future Solaris versions. */
844     if (ret != NULL) {
845         *ret = atomic_or_64_nv(val, 0);
846         return 1;
847     }
848 # endif
849     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
850         return 0;
851     *ret  = *val;
852     if (!CRYPTO_THREAD_unlock(lock))
853         return 0;
854 
855     return 1;
856 }
857 
CRYPTO_atomic_store(uint64_t * dst,uint64_t val,CRYPTO_RWLOCK * lock)858 int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
859 {
860 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
861     if (__atomic_is_lock_free(sizeof(*dst), dst)) {
862         __atomic_store(dst, &val, __ATOMIC_RELEASE);
863         return 1;
864     }
865 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
866     /* This will work for all future Solaris versions. */
867     if (dst != NULL) {
868         atomic_swap_64(dst, val);
869         return 1;
870     }
871 # endif
872     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
873         return 0;
874     *dst  = val;
875     if (!CRYPTO_THREAD_unlock(lock))
876         return 0;
877 
878     return 1;
879 }
880 
CRYPTO_atomic_load_int(int * val,int * ret,CRYPTO_RWLOCK * lock)881 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
882 {
883 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
884     if (__atomic_is_lock_free(sizeof(*val), val)) {
885         __atomic_load(val, ret, __ATOMIC_ACQUIRE);
886         return 1;
887     }
888 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
889     /* This will work for all future Solaris versions. */
890     if (ret != NULL) {
891         *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
892         return 1;
893     }
894 # endif
895     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
896         return 0;
897     *ret  = *val;
898     if (!CRYPTO_THREAD_unlock(lock))
899         return 0;
900 
901     return 1;
902 }
903 
904 # ifndef FIPS_MODULE
openssl_init_fork_handlers(void)905 int openssl_init_fork_handlers(void)
906 {
907     return 1;
908 }
909 # endif /* FIPS_MODULE */
910 
openssl_get_fork_id(void)911 int openssl_get_fork_id(void)
912 {
913     return getpid();
914 }
915 #endif
916