xref: /freebsd/crypto/openssl/crypto/threads_pthread.c (revision 046c625e9382e17da953767b881aaa782fa73af8)
1 /*
2  * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
12 
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include "internal/cryptlib.h"
16 #include "internal/rcu.h"
17 #include "rcu_internal.h"
18 
19 #if defined(__clang__) && defined(__has_feature)
20 # if __has_feature(thread_sanitizer)
21 #  define __SANITIZE_THREAD__
22 # endif
23 #endif
24 
25 #if defined(__SANITIZE_THREAD__)
26 # include <sanitizer/tsan_interface.h>
27 # define TSAN_FAKE_UNLOCK(x)   __tsan_mutex_pre_unlock((x), 0); \
28 __tsan_mutex_post_unlock((x), 0)
29 
30 # define TSAN_FAKE_LOCK(x)  __tsan_mutex_pre_lock((x), 0); \
31 __tsan_mutex_post_lock((x), 0, 0)
32 #else
33 # define TSAN_FAKE_UNLOCK(x)
34 # define TSAN_FAKE_LOCK(x)
35 #endif
36 
37 #if defined(__sun)
38 # include <atomic.h>
39 #endif
40 
41 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
42 /*
43  * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
44  * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
45  * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
46  * All of this makes impossible to use __atomic_is_lock_free here.
47  *
48  * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
49  */
50 # define BROKEN_CLANG_ATOMICS
51 #endif
52 
53 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
54 
55 # if defined(OPENSSL_SYS_UNIX)
56 #  include <sys/types.h>
57 #  include <unistd.h>
58 # endif
59 
60 # include <assert.h>
61 
62 /*
63  * The Non-Stop KLT thread model currently seems broken in its rwlock
64  * implementation
65  * Likewise is there a problem with the glibc implementation on riscv.
66  */
67 # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
68                                          && !defined(__riscv)
69 #  define USE_RWLOCK
70 # endif
71 
72 /*
73  * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
74  * other compilers.
75 
76  * Unfortunately, we can't do that with some "generic type", because there's no
77  * guarantee that the chosen generic type is large enough to cover all cases.
78  * Therefore, we implement fallbacks for each applicable type, with composed
79  * names that include the type they handle.
80  *
81  * (an anecdote: we previously tried to use |void *| as the generic type, with
82  * the thought that the pointer itself is the largest type.  However, this is
83  * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
84  *
85  * All applicable ATOMIC_ macros take the intended type as first parameter, so
86  * they can map to the correct fallback function.  In the GNU/clang case, that
87  * parameter is simply ignored.
88  */
89 
90 /*
91  * Internal types used with the ATOMIC_ macros, to make it possible to compose
92  * fallback function names.
93  */
94 typedef void *pvoid;
95 
96 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97     && !defined(USE_ATOMIC_FALLBACKS)
98 #  define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99 #  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100 #  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101 #  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102 #  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
103 # else
104 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
105 
106 #  define IMPL_fallback_atomic_load_n(t)                        \
107     static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
108     {                                                           \
109         t ret;                                                  \
110                                                                 \
111         pthread_mutex_lock(&atomic_sim_lock);                   \
112         ret = *p;                                               \
113         pthread_mutex_unlock(&atomic_sim_lock);                 \
114         return ret;                                             \
115     }
116 IMPL_fallback_atomic_load_n(uint32_t)
IMPL_fallback_atomic_load_n(uint64_t)117 IMPL_fallback_atomic_load_n(uint64_t)
118 IMPL_fallback_atomic_load_n(pvoid)
119 
120 #  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
121 
122 #  define IMPL_fallback_atomic_store_n(t)                       \
123     static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
124     {                                                           \
125         t ret;                                                  \
126                                                                 \
127         pthread_mutex_lock(&atomic_sim_lock);                   \
128         ret = *p;                                               \
129         *p = v;                                                 \
130         pthread_mutex_unlock(&atomic_sim_lock);                 \
131         return ret;                                             \
132     }
133 IMPL_fallback_atomic_store_n(uint32_t)
134 
135 #  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
136 
137 #  define IMPL_fallback_atomic_store(t)                         \
138     static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
139     {                                                           \
140         pthread_mutex_lock(&atomic_sim_lock);                   \
141         *p = *v;                                                \
142         pthread_mutex_unlock(&atomic_sim_lock);                 \
143     }
144 IMPL_fallback_atomic_store(pvoid)
145 
146 #  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
147 
148 /*
149  * The fallbacks that follow don't need any per type implementation, as
150  * they are designed for uint64_t only.  If there comes a time when multiple
151  * types need to be covered, it's relatively easy to refactor them the same
152  * way as the fallbacks above.
153  */
154 
155 static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
156 {
157     uint64_t ret;
158 
159     pthread_mutex_lock(&atomic_sim_lock);
160     *p += v;
161     ret = *p;
162     pthread_mutex_unlock(&atomic_sim_lock);
163     return ret;
164 }
165 
166 #  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
167 
fallback_atomic_sub_fetch(uint64_t * p,uint64_t v)168 static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
169 {
170     uint64_t ret;
171 
172     pthread_mutex_lock(&atomic_sim_lock);
173     *p -= v;
174     ret = *p;
175     pthread_mutex_unlock(&atomic_sim_lock);
176     return ret;
177 }
178 
179 #  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
180 # endif
181 
182 /*
183  * This is the core of an rcu lock. It tracks the readers and writers for the
184  * current quiescence point for a given lock. Users is the 64 bit value that
185  * stores the READERS/ID as defined above
186  *
187  */
188 struct rcu_qp {
189     uint64_t users;
190 };
191 
192 struct thread_qp {
193     struct rcu_qp *qp;
194     unsigned int depth;
195     CRYPTO_RCU_LOCK *lock;
196 };
197 
198 # define MAX_QPS 10
199 /*
200  * This is the per thread tracking data
201  * that is assigned to each thread participating
202  * in an rcu qp
203  *
204  * qp points to the qp that it last acquired
205  *
206  */
207 struct rcu_thr_data {
208     struct thread_qp thread_qps[MAX_QPS];
209 };
210 
211 /*
212  * This is the internal version of a CRYPTO_RCU_LOCK
213  * it is cast from CRYPTO_RCU_LOCK
214  */
215 struct rcu_lock_st {
216     /* Callbacks to call for next ossl_synchronize_rcu */
217     struct rcu_cb_item *cb_items;
218 
219     /* The context we are being created against */
220     OSSL_LIB_CTX *ctx;
221 
222     /* Array of quiescent points for synchronization */
223     struct rcu_qp *qp_group;
224 
225     /* rcu generation counter for in-order retirement */
226     uint32_t id_ctr;
227 
228     /* Number of elements in qp_group array */
229     uint32_t group_count;
230 
231     /* Index of the current qp in the qp_group array */
232     uint32_t reader_idx;
233 
234     /* value of the next id_ctr value to be retired */
235     uint32_t next_to_retire;
236 
237     /* index of the next free rcu_qp in the qp_group */
238     uint32_t current_alloc_idx;
239 
240     /* number of qp's in qp_group array currently being retired */
241     uint32_t writers_alloced;
242 
243     /* lock protecting write side operations */
244     pthread_mutex_t write_lock;
245 
246     /* lock protecting updates to writers_alloced/current_alloc_idx */
247     pthread_mutex_t alloc_lock;
248 
249     /* signal to wake threads waiting on alloc_lock */
250     pthread_cond_t alloc_signal;
251 
252     /* lock to enforce in-order retirement */
253     pthread_mutex_t prior_lock;
254 
255     /* signal to wake threads waiting on prior_lock */
256     pthread_cond_t prior_signal;
257 };
258 
259 /* Read side acquisition of the current qp */
get_hold_current_qp(struct rcu_lock_st * lock)260 static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
261 {
262     uint32_t qp_idx;
263 
264     /* get the current qp index */
265     for (;;) {
266         qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
267 
268         /*
269          * Notes on use of __ATOMIC_ACQUIRE
270          * We need to ensure the following:
271          * 1) That subsequent operations aren't optimized by hoisting them above
272          * this operation.  Specifically, we don't want the below re-load of
273          * qp_idx to get optimized away
274          * 2) We want to ensure that any updating of reader_idx on the write side
275          * of the lock is flushed from a local cpu cache so that we see any
276          * updates prior to the load.  This is a non-issue on cache coherent
277          * systems like x86, but is relevant on other arches
278          */
279         ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
280                          __ATOMIC_ACQUIRE);
281 
282         /* if the idx hasn't changed, we're good, else try again */
283         if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
284                                     __ATOMIC_ACQUIRE))
285             break;
286 
287         ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
288                          __ATOMIC_RELAXED);
289     }
290 
291     return &lock->qp_group[qp_idx];
292 }
293 
ossl_rcu_free_local_data(void * arg)294 static void ossl_rcu_free_local_data(void *arg)
295 {
296     OSSL_LIB_CTX *ctx = arg;
297     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
298     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
299 
300     OPENSSL_free(data);
301     CRYPTO_THREAD_set_local(lkey, NULL);
302 }
303 
ossl_rcu_read_lock(CRYPTO_RCU_LOCK * lock)304 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
305 {
306     struct rcu_thr_data *data;
307     int i, available_qp = -1;
308     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
309 
310     /*
311      * we're going to access current_qp here so ask the
312      * processor to fetch it
313      */
314     data = CRYPTO_THREAD_get_local(lkey);
315 
316     if (data == NULL) {
317         data = OPENSSL_zalloc(sizeof(*data));
318         OPENSSL_assert(data != NULL);
319         CRYPTO_THREAD_set_local(lkey, data);
320         ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
321     }
322 
323     for (i = 0; i < MAX_QPS; i++) {
324         if (data->thread_qps[i].qp == NULL && available_qp == -1)
325             available_qp = i;
326         /* If we have a hold on this lock already, we're good */
327         if (data->thread_qps[i].lock == lock) {
328             data->thread_qps[i].depth++;
329             return;
330         }
331     }
332 
333     /*
334      * if we get here, then we don't have a hold on this lock yet
335      */
336     assert(available_qp != -1);
337 
338     data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
339     data->thread_qps[available_qp].depth = 1;
340     data->thread_qps[available_qp].lock = lock;
341 }
342 
ossl_rcu_read_unlock(CRYPTO_RCU_LOCK * lock)343 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
344 {
345     int i;
346     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
347     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
348     uint64_t ret;
349 
350     assert(data != NULL);
351 
352     for (i = 0; i < MAX_QPS; i++) {
353         if (data->thread_qps[i].lock == lock) {
354             /*
355              * we have to use __ATOMIC_RELEASE here
356              * to ensure that all preceding read instructions complete
357              * before the decrement is visible to ossl_synchronize_rcu
358              */
359             data->thread_qps[i].depth--;
360             if (data->thread_qps[i].depth == 0) {
361                 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
362                                        (uint64_t)1, __ATOMIC_RELEASE);
363                 OPENSSL_assert(ret != UINT64_MAX);
364                 data->thread_qps[i].qp = NULL;
365                 data->thread_qps[i].lock = NULL;
366             }
367             return;
368         }
369     }
370     /*
371      * If we get here, we're trying to unlock a lock that we never acquired -
372      * that's fatal.
373      */
374     assert(0);
375 }
376 
377 /*
378  * Write side allocation routine to get the current qp
379  * and replace it with a new one
380  */
update_qp(CRYPTO_RCU_LOCK * lock,uint32_t * curr_id)381 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
382 {
383     uint32_t current_idx;
384 
385     pthread_mutex_lock(&lock->alloc_lock);
386 
387     /*
388      * we need at least one qp to be available with one
389      * left over, so that readers can start working on
390      * one that isn't yet being waited on
391      */
392     while (lock->group_count - lock->writers_alloced < 2)
393         /* we have to wait for one to be free */
394         pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
395 
396     current_idx = lock->current_alloc_idx;
397 
398     /* Allocate the qp */
399     lock->writers_alloced++;
400 
401     /* increment the allocation index */
402     lock->current_alloc_idx =
403         (lock->current_alloc_idx + 1) % lock->group_count;
404 
405     *curr_id = lock->id_ctr;
406     lock->id_ctr++;
407 
408     /*
409      * make the current state of everything visible by this release
410      * when get_hold_current_qp acquires the next qp
411      */
412     ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
413                    __ATOMIC_RELEASE);
414 
415     /*
416      * this should make sure that the new value of reader_idx is visible in
417      * get_hold_current_qp, directly after incrementing the users count
418      */
419     ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
420                      __ATOMIC_RELEASE);
421 
422     /* wake up any waiters */
423     pthread_cond_signal(&lock->alloc_signal);
424     pthread_mutex_unlock(&lock->alloc_lock);
425     return &lock->qp_group[current_idx];
426 }
427 
retire_qp(CRYPTO_RCU_LOCK * lock,struct rcu_qp * qp)428 static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
429 {
430     pthread_mutex_lock(&lock->alloc_lock);
431     lock->writers_alloced--;
432     pthread_cond_signal(&lock->alloc_signal);
433     pthread_mutex_unlock(&lock->alloc_lock);
434 }
435 
allocate_new_qp_group(CRYPTO_RCU_LOCK * lock,uint32_t count)436 static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
437                                             uint32_t count)
438 {
439     struct rcu_qp *new =
440         OPENSSL_zalloc(sizeof(*new) * count);
441 
442     lock->group_count = count;
443     return new;
444 }
445 
ossl_rcu_write_lock(CRYPTO_RCU_LOCK * lock)446 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
447 {
448     pthread_mutex_lock(&lock->write_lock);
449     TSAN_FAKE_UNLOCK(&lock->write_lock);
450 }
451 
ossl_rcu_write_unlock(CRYPTO_RCU_LOCK * lock)452 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
453 {
454     TSAN_FAKE_LOCK(&lock->write_lock);
455     pthread_mutex_unlock(&lock->write_lock);
456 }
457 
ossl_synchronize_rcu(CRYPTO_RCU_LOCK * lock)458 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
459 {
460     struct rcu_qp *qp;
461     uint64_t count;
462     uint32_t curr_id;
463     struct rcu_cb_item *cb_items, *tmpcb;
464 
465     pthread_mutex_lock(&lock->write_lock);
466     cb_items = lock->cb_items;
467     lock->cb_items = NULL;
468     pthread_mutex_unlock(&lock->write_lock);
469 
470     qp = update_qp(lock, &curr_id);
471 
472     /* retire in order */
473     pthread_mutex_lock(&lock->prior_lock);
474     while (lock->next_to_retire != curr_id)
475         pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
476 
477     /*
478      * wait for the reader count to reach zero
479      * Note the use of __ATOMIC_ACQUIRE here to ensure that any
480      * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
481      * is visible prior to our read
482      * however this is likely just necessary to silence a tsan warning
483      * because the read side should not do any write operation
484      * outside the atomic itself
485      */
486     do {
487         count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
488     } while (count != (uint64_t)0);
489 
490     lock->next_to_retire++;
491     pthread_cond_broadcast(&lock->prior_signal);
492     pthread_mutex_unlock(&lock->prior_lock);
493 
494     retire_qp(lock, qp);
495 
496     /* handle any callbacks that we have */
497     while (cb_items != NULL) {
498         tmpcb = cb_items;
499         cb_items = cb_items->next;
500         tmpcb->fn(tmpcb->data);
501         OPENSSL_free(tmpcb);
502     }
503 }
504 
505 /*
506  * Note: This call assumes its made under the protection of
507  * ossl_rcu_write_lock
508  */
ossl_rcu_call(CRYPTO_RCU_LOCK * lock,rcu_cb_fn cb,void * data)509 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
510 {
511     struct rcu_cb_item *new =
512         OPENSSL_zalloc(sizeof(*new));
513 
514     if (new == NULL)
515         return 0;
516 
517     new->data = data;
518     new->fn = cb;
519 
520     new->next = lock->cb_items;
521     lock->cb_items = new;
522 
523     return 1;
524 }
525 
ossl_rcu_uptr_deref(void ** p)526 void *ossl_rcu_uptr_deref(void **p)
527 {
528     return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
529 }
530 
ossl_rcu_assign_uptr(void ** p,void ** v)531 void ossl_rcu_assign_uptr(void **p, void **v)
532 {
533     ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
534 }
535 
ossl_rcu_lock_new(int num_writers,OSSL_LIB_CTX * ctx)536 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
537 {
538     struct rcu_lock_st *new;
539 
540     /*
541      * We need a minimum of 2 qp's
542      */
543     if (num_writers < 2)
544         num_writers = 2;
545 
546     ctx = ossl_lib_ctx_get_concrete(ctx);
547     if (ctx == NULL)
548         return 0;
549 
550     new = OPENSSL_zalloc(sizeof(*new));
551     if (new == NULL)
552         return NULL;
553 
554     new->ctx = ctx;
555     pthread_mutex_init(&new->write_lock, NULL);
556     pthread_mutex_init(&new->prior_lock, NULL);
557     pthread_mutex_init(&new->alloc_lock, NULL);
558     pthread_cond_init(&new->prior_signal, NULL);
559     pthread_cond_init(&new->alloc_signal, NULL);
560 
561     new->qp_group = allocate_new_qp_group(new, num_writers);
562     if (new->qp_group == NULL) {
563         OPENSSL_free(new);
564         new = NULL;
565     }
566 
567     return new;
568 }
569 
ossl_rcu_lock_free(CRYPTO_RCU_LOCK * lock)570 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
571 {
572     struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
573 
574     if (lock == NULL)
575         return;
576 
577     /* make sure we're synchronized */
578     ossl_synchronize_rcu(rlock);
579 
580     OPENSSL_free(rlock->qp_group);
581     /* There should only be a single qp left now */
582     OPENSSL_free(rlock);
583 }
584 
CRYPTO_THREAD_lock_new(void)585 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
586 {
587 # ifdef USE_RWLOCK
588     CRYPTO_RWLOCK *lock;
589 
590     if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
591         /* Don't set error, to avoid recursion blowup. */
592         return NULL;
593 
594     if (pthread_rwlock_init(lock, NULL) != 0) {
595         OPENSSL_free(lock);
596         return NULL;
597     }
598 # else
599     pthread_mutexattr_t attr;
600     CRYPTO_RWLOCK *lock;
601 
602     if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
603         /* Don't set error, to avoid recursion blowup. */
604         return NULL;
605 
606     /*
607      * We don't use recursive mutexes, but try to catch errors if we do.
608      */
609     pthread_mutexattr_init(&attr);
610 #  if !defined (__TANDEM) && !defined (_SPT_MODEL_)
611 #   if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
612     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
613 #   endif
614 #  else
615     /* The SPT Thread Library does not define MUTEX attributes. */
616 #  endif
617 
618     if (pthread_mutex_init(lock, &attr) != 0) {
619         pthread_mutexattr_destroy(&attr);
620         OPENSSL_free(lock);
621         return NULL;
622     }
623 
624     pthread_mutexattr_destroy(&attr);
625 # endif
626 
627     return lock;
628 }
629 
CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK * lock)630 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
631 {
632 # ifdef USE_RWLOCK
633     if (!ossl_assert(pthread_rwlock_rdlock(lock) == 0))
634         return 0;
635 # else
636     if (pthread_mutex_lock(lock) != 0) {
637         assert(errno != EDEADLK && errno != EBUSY);
638         return 0;
639     }
640 # endif
641 
642     return 1;
643 }
644 
CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK * lock)645 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
646 {
647 # ifdef USE_RWLOCK
648     if (!ossl_assert(pthread_rwlock_wrlock(lock) == 0))
649         return 0;
650 # else
651     if (pthread_mutex_lock(lock) != 0) {
652         assert(errno != EDEADLK && errno != EBUSY);
653         return 0;
654     }
655 # endif
656 
657     return 1;
658 }
659 
CRYPTO_THREAD_unlock(CRYPTO_RWLOCK * lock)660 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
661 {
662 # ifdef USE_RWLOCK
663     if (pthread_rwlock_unlock(lock) != 0)
664         return 0;
665 # else
666     if (pthread_mutex_unlock(lock) != 0) {
667         assert(errno != EPERM);
668         return 0;
669     }
670 # endif
671 
672     return 1;
673 }
674 
CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK * lock)675 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
676 {
677     if (lock == NULL)
678         return;
679 
680 # ifdef USE_RWLOCK
681     pthread_rwlock_destroy(lock);
682 # else
683     pthread_mutex_destroy(lock);
684 # endif
685     OPENSSL_free(lock);
686 
687     return;
688 }
689 
CRYPTO_THREAD_run_once(CRYPTO_ONCE * once,void (* init)(void))690 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
691 {
692     if (pthread_once(once, init) != 0)
693         return 0;
694 
695     return 1;
696 }
697 
CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL * key,void (* cleanup)(void *))698 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
699 {
700     if (pthread_key_create(key, cleanup) != 0)
701         return 0;
702 
703     return 1;
704 }
705 
CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL * key)706 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
707 {
708     return pthread_getspecific(*key);
709 }
710 
CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL * key,void * val)711 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
712 {
713     if (pthread_setspecific(*key, val) != 0)
714         return 0;
715 
716     return 1;
717 }
718 
CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL * key)719 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
720 {
721     if (pthread_key_delete(*key) != 0)
722         return 0;
723 
724     return 1;
725 }
726 
CRYPTO_THREAD_get_current_id(void)727 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
728 {
729     return pthread_self();
730 }
731 
CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a,CRYPTO_THREAD_ID b)732 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
733 {
734     return pthread_equal(a, b);
735 }
736 
CRYPTO_atomic_add(int * val,int amount,int * ret,CRYPTO_RWLOCK * lock)737 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
738 {
739 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
740     if (__atomic_is_lock_free(sizeof(*val), val)) {
741         *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
742         return 1;
743     }
744 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
745     /* This will work for all future Solaris versions. */
746     if (ret != NULL) {
747         *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
748         return 1;
749     }
750 # endif
751     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
752         return 0;
753 
754     *val += amount;
755     *ret  = *val;
756 
757     if (!CRYPTO_THREAD_unlock(lock))
758         return 0;
759 
760     return 1;
761 }
762 
CRYPTO_atomic_add64(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)763 int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
764                         CRYPTO_RWLOCK *lock)
765 {
766 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
767     if (__atomic_is_lock_free(sizeof(*val), val)) {
768         *ret = __atomic_add_fetch(val, op, __ATOMIC_ACQ_REL);
769         return 1;
770     }
771 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
772     /* This will work for all future Solaris versions. */
773     if (ret != NULL) {
774         *ret = atomic_add_64_nv(val, op);
775         return 1;
776     }
777 # endif
778     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
779         return 0;
780     *val += op;
781     *ret  = *val;
782 
783     if (!CRYPTO_THREAD_unlock(lock))
784         return 0;
785 
786     return 1;
787 }
788 
CRYPTO_atomic_and(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)789 int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
790                       CRYPTO_RWLOCK *lock)
791 {
792 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
793     if (__atomic_is_lock_free(sizeof(*val), val)) {
794         *ret = __atomic_and_fetch(val, op, __ATOMIC_ACQ_REL);
795         return 1;
796     }
797 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
798     /* This will work for all future Solaris versions. */
799     if (ret != NULL) {
800         *ret = atomic_and_64_nv(val, op);
801         return 1;
802     }
803 # endif
804     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
805         return 0;
806     *val &= op;
807     *ret  = *val;
808 
809     if (!CRYPTO_THREAD_unlock(lock))
810         return 0;
811 
812     return 1;
813 }
814 
CRYPTO_atomic_or(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)815 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
816                      CRYPTO_RWLOCK *lock)
817 {
818 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
819     if (__atomic_is_lock_free(sizeof(*val), val)) {
820         *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
821         return 1;
822     }
823 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
824     /* This will work for all future Solaris versions. */
825     if (ret != NULL) {
826         *ret = atomic_or_64_nv(val, op);
827         return 1;
828     }
829 # endif
830     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
831         return 0;
832     *val |= op;
833     *ret  = *val;
834 
835     if (!CRYPTO_THREAD_unlock(lock))
836         return 0;
837 
838     return 1;
839 }
840 
CRYPTO_atomic_load(uint64_t * val,uint64_t * ret,CRYPTO_RWLOCK * lock)841 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
842 {
843 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
844     if (__atomic_is_lock_free(sizeof(*val), val)) {
845         __atomic_load(val, ret, __ATOMIC_ACQUIRE);
846         return 1;
847     }
848 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
849     /* This will work for all future Solaris versions. */
850     if (ret != NULL) {
851         *ret = atomic_or_64_nv(val, 0);
852         return 1;
853     }
854 # endif
855     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
856         return 0;
857     *ret  = *val;
858     if (!CRYPTO_THREAD_unlock(lock))
859         return 0;
860 
861     return 1;
862 }
863 
CRYPTO_atomic_store(uint64_t * dst,uint64_t val,CRYPTO_RWLOCK * lock)864 int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
865 {
866 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
867     if (__atomic_is_lock_free(sizeof(*dst), dst)) {
868         __atomic_store(dst, &val, __ATOMIC_RELEASE);
869         return 1;
870     }
871 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
872     /* This will work for all future Solaris versions. */
873     if (dst != NULL) {
874         atomic_swap_64(dst, val);
875         return 1;
876     }
877 # endif
878     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
879         return 0;
880     *dst  = val;
881     if (!CRYPTO_THREAD_unlock(lock))
882         return 0;
883 
884     return 1;
885 }
886 
CRYPTO_atomic_load_int(int * val,int * ret,CRYPTO_RWLOCK * lock)887 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
888 {
889 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
890     if (__atomic_is_lock_free(sizeof(*val), val)) {
891         __atomic_load(val, ret, __ATOMIC_ACQUIRE);
892         return 1;
893     }
894 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
895     /* This will work for all future Solaris versions. */
896     if (ret != NULL) {
897         *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
898         return 1;
899     }
900 # endif
901     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
902         return 0;
903     *ret  = *val;
904     if (!CRYPTO_THREAD_unlock(lock))
905         return 0;
906 
907     return 1;
908 }
909 
910 # ifndef FIPS_MODULE
openssl_init_fork_handlers(void)911 int openssl_init_fork_handlers(void)
912 {
913     return 1;
914 }
915 # endif /* FIPS_MODULE */
916 
openssl_get_fork_id(void)917 int openssl_get_fork_id(void)
918 {
919     return getpid();
920 }
921 #endif
922