xref: /freebsd/crypto/openssl/crypto/threads_win.c (revision f25b8c9fb4f58cf61adb47d7570abe7caa6d385d)
1 /*
2  * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 #if defined(_WIN32)
11 #include <windows.h>
12 #if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
13 #define USE_RWLOCK
14 #endif
15 #endif
16 #include <assert.h>
17 
18 /*
19  * VC++ 2008 or earlier x86 compilers do not have an inline implementation
20  * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
21  * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
22  * To work around this problem, we implement a manual locking mechanism for
23  * only VC++ 2008 or earlier x86 compilers.
24  */
25 
26 #if ((defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600) || (defined(__MINGW32__) && !defined(__MINGW64__)))
27 #define NO_INTERLOCKEDOR64
28 #endif
29 
30 #include <openssl/crypto.h>
31 #include <crypto/cryptlib.h>
32 #include "internal/common.h"
33 #include "internal/thread_arch.h"
34 #include "internal/rcu.h"
35 #include "rcu_internal.h"
36 
37 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
38 
39 #ifdef USE_RWLOCK
40 typedef struct {
41     SRWLOCK lock;
42     int exclusive;
43 } CRYPTO_win_rwlock;
44 #endif
45 
46 /*
47  * This defines a quescent point (qp)
48  * This is the barrier beyond which a writer
49  * must wait before freeing data that was
50  * atomically updated
51  */
52 struct rcu_qp {
53     volatile uint64_t users;
54 };
55 
56 struct thread_qp {
57     struct rcu_qp *qp;
58     unsigned int depth;
59     CRYPTO_RCU_LOCK *lock;
60 };
61 
62 #define MAX_QPS 10
63 /*
64  * This is the per thread tracking data
65  * that is assigned to each thread participating
66  * in an rcu qp
67  *
68  * qp points to the qp that it last acquired
69  *
70  */
71 struct rcu_thr_data {
72     struct thread_qp thread_qps[MAX_QPS];
73 };
74 
75 /*
76  * This is the internal version of a CRYPTO_RCU_LOCK
77  * it is cast from CRYPTO_RCU_LOCK
78  */
79 struct rcu_lock_st {
80     /* Callbacks to call for next ossl_synchronize_rcu */
81     struct rcu_cb_item *cb_items;
82 
83     /* The context we are being created against */
84     OSSL_LIB_CTX *ctx;
85 
86     /* Array of quiescent points for synchronization */
87     struct rcu_qp *qp_group;
88 
89     /* rcu generation counter for in-order retirement */
90     uint32_t id_ctr;
91 
92     /* Number of elements in qp_group array */
93     uint32_t group_count;
94 
95     /* Index of the current qp in the qp_group array */
96     uint32_t reader_idx;
97 
98     /* value of the next id_ctr value to be retired */
99     uint32_t next_to_retire;
100 
101     /* index of the next free rcu_qp in the qp_group */
102     uint32_t current_alloc_idx;
103 
104     /* number of qp's in qp_group array currently being retired */
105     uint32_t writers_alloced;
106 
107     /* lock protecting write side operations */
108     CRYPTO_MUTEX *write_lock;
109 
110     /* lock protecting updates to writers_alloced/current_alloc_idx */
111     CRYPTO_MUTEX *alloc_lock;
112 
113     /* signal to wake threads waiting on alloc_lock */
114     CRYPTO_CONDVAR *alloc_signal;
115 
116     /* lock to enforce in-order retirement */
117     CRYPTO_MUTEX *prior_lock;
118 
119     /* signal to wake threads waiting on prior_lock */
120     CRYPTO_CONDVAR *prior_signal;
121 
122     /* lock used with NO_INTERLOCKEDOR64: VS2010 x86 */
123     CRYPTO_RWLOCK *rw_lock;
124 };
125 
allocate_new_qp_group(struct rcu_lock_st * lock,uint32_t count)126 static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
127     uint32_t count)
128 {
129     struct rcu_qp *new = OPENSSL_zalloc(sizeof(*new) * count);
130 
131     lock->group_count = count;
132     return new;
133 }
134 
ossl_rcu_lock_new(int num_writers,OSSL_LIB_CTX * ctx)135 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
136 {
137     struct rcu_lock_st *new;
138 
139     /*
140      * We need a minimum of 2 qps
141      */
142     if (num_writers < 2)
143         num_writers = 2;
144 
145     ctx = ossl_lib_ctx_get_concrete(ctx);
146     if (ctx == NULL)
147         return 0;
148 
149     new = OPENSSL_zalloc(sizeof(*new));
150 
151     if (new == NULL)
152         return NULL;
153 
154     new->ctx = ctx;
155     new->rw_lock = CRYPTO_THREAD_lock_new();
156     new->write_lock = ossl_crypto_mutex_new();
157     new->alloc_signal = ossl_crypto_condvar_new();
158     new->prior_signal = ossl_crypto_condvar_new();
159     new->alloc_lock = ossl_crypto_mutex_new();
160     new->prior_lock = ossl_crypto_mutex_new();
161     new->qp_group = allocate_new_qp_group(new, num_writers);
162     if (new->qp_group == NULL
163         || new->alloc_signal == NULL
164         || new->prior_signal == NULL
165         || new->write_lock == NULL
166         || new->alloc_lock == NULL
167         || new->prior_lock == NULL
168         || new->rw_lock == NULL) {
169         CRYPTO_THREAD_lock_free(new->rw_lock);
170         OPENSSL_free(new->qp_group);
171         ossl_crypto_condvar_free(&new->alloc_signal);
172         ossl_crypto_condvar_free(&new->prior_signal);
173         ossl_crypto_mutex_free(&new->alloc_lock);
174         ossl_crypto_mutex_free(&new->prior_lock);
175         ossl_crypto_mutex_free(&new->write_lock);
176         OPENSSL_free(new);
177         new = NULL;
178     }
179 
180     return new;
181 }
182 
ossl_rcu_lock_free(CRYPTO_RCU_LOCK * lock)183 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
184 {
185     CRYPTO_THREAD_lock_free(lock->rw_lock);
186     OPENSSL_free(lock->qp_group);
187     ossl_crypto_condvar_free(&lock->alloc_signal);
188     ossl_crypto_condvar_free(&lock->prior_signal);
189     ossl_crypto_mutex_free(&lock->alloc_lock);
190     ossl_crypto_mutex_free(&lock->prior_lock);
191     ossl_crypto_mutex_free(&lock->write_lock);
192     OPENSSL_free(lock);
193 }
194 
195 /* Read side acquisition of the current qp */
get_hold_current_qp(CRYPTO_RCU_LOCK * lock)196 static ossl_inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
197 {
198     uint32_t qp_idx;
199     uint32_t tmp;
200     uint64_t tmp64;
201 
202     /* get the current qp index */
203     for (;;) {
204         CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&qp_idx,
205             lock->rw_lock);
206         CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)1, &tmp64,
207             lock->rw_lock);
208         CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&tmp,
209             lock->rw_lock);
210         if (qp_idx == tmp)
211             break;
212         CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)-1, &tmp64,
213             lock->rw_lock);
214     }
215 
216     return &lock->qp_group[qp_idx];
217 }
218 
ossl_rcu_free_local_data(void * arg)219 static void ossl_rcu_free_local_data(void *arg)
220 {
221     OSSL_LIB_CTX *ctx = arg;
222     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
223     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
224     OPENSSL_free(data);
225     CRYPTO_THREAD_set_local(lkey, NULL);
226 }
227 
ossl_rcu_read_lock(CRYPTO_RCU_LOCK * lock)228 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
229 {
230     struct rcu_thr_data *data;
231     int i;
232     int available_qp = -1;
233     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
234 
235     /*
236      * we're going to access current_qp here so ask the
237      * processor to fetch it
238      */
239     data = CRYPTO_THREAD_get_local(lkey);
240 
241     if (data == NULL) {
242         data = OPENSSL_zalloc(sizeof(*data));
243         OPENSSL_assert(data != NULL);
244         CRYPTO_THREAD_set_local(lkey, data);
245         ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
246     }
247 
248     for (i = 0; i < MAX_QPS; i++) {
249         if (data->thread_qps[i].qp == NULL && available_qp == -1)
250             available_qp = i;
251         /* If we have a hold on this lock already, we're good */
252         if (data->thread_qps[i].lock == lock)
253             return;
254     }
255 
256     /*
257      * if we get here, then we don't have a hold on this lock yet
258      */
259     assert(available_qp != -1);
260 
261     data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
262     data->thread_qps[available_qp].depth = 1;
263     data->thread_qps[available_qp].lock = lock;
264 }
265 
ossl_rcu_write_lock(CRYPTO_RCU_LOCK * lock)266 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
267 {
268     ossl_crypto_mutex_lock(lock->write_lock);
269 }
270 
ossl_rcu_write_unlock(CRYPTO_RCU_LOCK * lock)271 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
272 {
273     ossl_crypto_mutex_unlock(lock->write_lock);
274 }
275 
ossl_rcu_read_unlock(CRYPTO_RCU_LOCK * lock)276 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
277 {
278     CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
279     struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
280     int i;
281     LONG64 ret;
282 
283     assert(data != NULL);
284 
285     for (i = 0; i < MAX_QPS; i++) {
286         if (data->thread_qps[i].lock == lock) {
287             data->thread_qps[i].depth--;
288             if (data->thread_qps[i].depth == 0) {
289                 CRYPTO_atomic_add64(&data->thread_qps[i].qp->users,
290                     (uint64_t)-1, (uint64_t *)&ret,
291                     lock->rw_lock);
292                 OPENSSL_assert(ret >= 0);
293                 data->thread_qps[i].qp = NULL;
294                 data->thread_qps[i].lock = NULL;
295             }
296             return;
297         }
298     }
299 }
300 
301 /*
302  * Write side allocation routine to get the current qp
303  * and replace it with a new one
304  */
update_qp(CRYPTO_RCU_LOCK * lock,uint32_t * curr_id)305 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
306 {
307     uint32_t current_idx;
308     uint32_t tmp;
309 
310     ossl_crypto_mutex_lock(lock->alloc_lock);
311     /*
312      * we need at least one qp to be available with one
313      * left over, so that readers can start working on
314      * one that isn't yet being waited on
315      */
316     while (lock->group_count - lock->writers_alloced < 2)
317         /* we have to wait for one to be free */
318         ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock);
319 
320     current_idx = lock->current_alloc_idx;
321 
322     /* Allocate the qp */
323     lock->writers_alloced++;
324 
325     /* increment the allocation index */
326     lock->current_alloc_idx = (lock->current_alloc_idx + 1) % lock->group_count;
327 
328     /* get and insert a new id */
329     *curr_id = lock->id_ctr;
330     lock->id_ctr++;
331 
332     /* update the reader index to be the prior qp */
333     tmp = lock->current_alloc_idx;
334 #if (defined(NO_INTERLOCKEDOR64))
335     CRYPTO_THREAD_write_lock(lock->rw_lock);
336     lock->reader_idx = tmp;
337     CRYPTO_THREAD_unlock(lock->rw_lock);
338 #else
339     InterlockedExchange((LONG volatile *)&lock->reader_idx, tmp);
340 #endif
341 
342     /* wake up any waiters */
343     ossl_crypto_condvar_broadcast(lock->alloc_signal);
344     ossl_crypto_mutex_unlock(lock->alloc_lock);
345     return &lock->qp_group[current_idx];
346 }
347 
retire_qp(CRYPTO_RCU_LOCK * lock,struct rcu_qp * qp)348 static void retire_qp(CRYPTO_RCU_LOCK *lock,
349     struct rcu_qp *qp)
350 {
351     ossl_crypto_mutex_lock(lock->alloc_lock);
352     lock->writers_alloced--;
353     ossl_crypto_condvar_broadcast(lock->alloc_signal);
354     ossl_crypto_mutex_unlock(lock->alloc_lock);
355 }
356 
ossl_synchronize_rcu(CRYPTO_RCU_LOCK * lock)357 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
358 {
359     struct rcu_qp *qp;
360     uint64_t count;
361     uint32_t curr_id;
362     struct rcu_cb_item *cb_items, *tmpcb;
363 
364     /* before we do anything else, lets grab the cb list */
365     ossl_crypto_mutex_lock(lock->write_lock);
366     cb_items = lock->cb_items;
367     lock->cb_items = NULL;
368     ossl_crypto_mutex_unlock(lock->write_lock);
369 
370     qp = update_qp(lock, &curr_id);
371 
372     /* retire in order */
373     ossl_crypto_mutex_lock(lock->prior_lock);
374     while (lock->next_to_retire != curr_id)
375         ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock);
376 
377     /* wait for the reader count to reach zero */
378     do {
379         CRYPTO_atomic_load(&qp->users, &count, lock->rw_lock);
380     } while (count != (uint64_t)0);
381 
382     lock->next_to_retire++;
383     ossl_crypto_condvar_broadcast(lock->prior_signal);
384     ossl_crypto_mutex_unlock(lock->prior_lock);
385 
386     retire_qp(lock, qp);
387 
388     /* handle any callbacks that we have */
389     while (cb_items != NULL) {
390         tmpcb = cb_items;
391         cb_items = cb_items->next;
392         tmpcb->fn(tmpcb->data);
393         OPENSSL_free(tmpcb);
394     }
395 
396     /* and we're done */
397     return;
398 }
399 
400 /*
401  * Note, must be called under the protection of ossl_rcu_write_lock
402  */
ossl_rcu_call(CRYPTO_RCU_LOCK * lock,rcu_cb_fn cb,void * data)403 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
404 {
405     struct rcu_cb_item *new;
406 
407     new = OPENSSL_zalloc(sizeof(struct rcu_cb_item));
408     if (new == NULL)
409         return 0;
410     new->data = data;
411     new->fn = cb;
412 
413     new->next = lock->cb_items;
414     lock->cb_items = new;
415 
416     return 1;
417 }
418 
ossl_rcu_uptr_deref(void ** p)419 void *ossl_rcu_uptr_deref(void **p)
420 {
421     return (void *)*p;
422 }
423 
ossl_rcu_assign_uptr(void ** p,void ** v)424 void ossl_rcu_assign_uptr(void **p, void **v)
425 {
426     InterlockedExchangePointer((void *volatile *)p, (void *)*v);
427 }
428 
CRYPTO_THREAD_lock_new(void)429 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
430 {
431     CRYPTO_RWLOCK *lock;
432 #ifdef USE_RWLOCK
433     CRYPTO_win_rwlock *rwlock;
434 
435     if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL)
436         /* Don't set error, to avoid recursion blowup. */
437         return NULL;
438     rwlock = lock;
439     InitializeSRWLock(&rwlock->lock);
440 #else
441 
442     if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL)
443         /* Don't set error, to avoid recursion blowup. */
444         return NULL;
445 
446 #if !defined(_WIN32_WCE)
447     /* 0x400 is the spin count value suggested in the documentation */
448     if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) {
449         OPENSSL_free(lock);
450         return NULL;
451     }
452 #else
453     InitializeCriticalSection(lock);
454 #endif
455 #endif
456 
457     return lock;
458 }
459 
CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK * lock)460 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
461 {
462 #ifdef USE_RWLOCK
463     CRYPTO_win_rwlock *rwlock = lock;
464 
465     AcquireSRWLockShared(&rwlock->lock);
466 #else
467     EnterCriticalSection(lock);
468 #endif
469     return 1;
470 }
471 
CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK * lock)472 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
473 {
474 #ifdef USE_RWLOCK
475     CRYPTO_win_rwlock *rwlock = lock;
476 
477     AcquireSRWLockExclusive(&rwlock->lock);
478     rwlock->exclusive = 1;
479 #else
480     EnterCriticalSection(lock);
481 #endif
482     return 1;
483 }
484 
CRYPTO_THREAD_unlock(CRYPTO_RWLOCK * lock)485 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
486 {
487 #ifdef USE_RWLOCK
488     CRYPTO_win_rwlock *rwlock = lock;
489 
490     if (rwlock->exclusive) {
491         rwlock->exclusive = 0;
492         ReleaseSRWLockExclusive(&rwlock->lock);
493     } else {
494         ReleaseSRWLockShared(&rwlock->lock);
495     }
496 #else
497     LeaveCriticalSection(lock);
498 #endif
499     return 1;
500 }
501 
CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK * lock)502 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
503 {
504     if (lock == NULL)
505         return;
506 
507 #ifndef USE_RWLOCK
508     DeleteCriticalSection(lock);
509 #endif
510     OPENSSL_free(lock);
511 
512     return;
513 }
514 
515 #define ONCE_UNINITED 0
516 #define ONCE_ININIT 1
517 #define ONCE_DONE 2
518 
519 /*
520  * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
521  * we still have to support.
522  */
CRYPTO_THREAD_run_once(CRYPTO_ONCE * once,void (* init)(void))523 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
524 {
525     LONG volatile *lock = (LONG *)once;
526     LONG result;
527 
528     if (*lock == ONCE_DONE)
529         return 1;
530 
531     do {
532         result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED);
533         if (result == ONCE_UNINITED) {
534             init();
535             *lock = ONCE_DONE;
536             return 1;
537         }
538     } while (result == ONCE_ININIT);
539 
540     return (*lock == ONCE_DONE);
541 }
542 
CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL * key,void (* cleanup)(void *))543 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
544 {
545 
546 #ifndef FIPS_MODULE
547     if (!ossl_init_thread())
548         return 0;
549 #endif
550 
551     *key = TlsAlloc();
552     if (*key == TLS_OUT_OF_INDEXES)
553         return 0;
554 
555     return 1;
556 }
557 
CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL * key)558 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
559 {
560     DWORD last_error;
561     void *ret;
562 
563     /*
564      * TlsGetValue clears the last error even on success, so that callers may
565      * distinguish it successfully returning NULL or failing. It is documented
566      * to never fail if the argument is a valid index from TlsAlloc, so we do
567      * not need to handle this.
568      *
569      * However, this error-mangling behavior interferes with the caller's use of
570      * GetLastError. In particular SSL_get_error queries the error queue to
571      * determine whether the caller should look at the OS's errors. To avoid
572      * destroying state, save and restore the Windows error.
573      *
574      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
575      */
576     last_error = GetLastError();
577     ret = TlsGetValue(*key);
578     SetLastError(last_error);
579     return ret;
580 }
581 
CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL * key,void * val)582 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
583 {
584     if (TlsSetValue(*key, val) == 0)
585         return 0;
586 
587     return 1;
588 }
589 
CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL * key)590 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
591 {
592     if (TlsFree(*key) == 0)
593         return 0;
594 
595     return 1;
596 }
597 
CRYPTO_THREAD_get_current_id(void)598 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
599 {
600     return GetCurrentThreadId();
601 }
602 
CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a,CRYPTO_THREAD_ID b)603 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
604 {
605     return (a == b);
606 }
607 
CRYPTO_atomic_add(int * val,int amount,int * ret,CRYPTO_RWLOCK * lock)608 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
609 {
610 #if (defined(NO_INTERLOCKEDOR64))
611     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
612         return 0;
613     *val += amount;
614     *ret = *val;
615 
616     if (!CRYPTO_THREAD_unlock(lock))
617         return 0;
618 
619     return 1;
620 #else
621     *ret = (int)InterlockedExchangeAdd((LONG volatile *)val, (LONG)amount)
622         + amount;
623     return 1;
624 #endif
625 }
626 
CRYPTO_atomic_add64(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)627 int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
628     CRYPTO_RWLOCK *lock)
629 {
630 #if (defined(NO_INTERLOCKEDOR64))
631     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
632         return 0;
633     *val += op;
634     *ret = *val;
635 
636     if (!CRYPTO_THREAD_unlock(lock))
637         return 0;
638 
639     return 1;
640 #else
641     *ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
642     return 1;
643 #endif
644 }
645 
CRYPTO_atomic_and(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)646 int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
647     CRYPTO_RWLOCK *lock)
648 {
649 #if (defined(NO_INTERLOCKEDOR64))
650     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
651         return 0;
652     *val &= op;
653     *ret = *val;
654 
655     if (!CRYPTO_THREAD_unlock(lock))
656         return 0;
657 
658     return 1;
659 #else
660     *ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op) & op;
661     return 1;
662 #endif
663 }
664 
CRYPTO_atomic_or(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)665 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
666     CRYPTO_RWLOCK *lock)
667 {
668 #if (defined(NO_INTERLOCKEDOR64))
669     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
670         return 0;
671     *val |= op;
672     *ret = *val;
673 
674     if (!CRYPTO_THREAD_unlock(lock))
675         return 0;
676 
677     return 1;
678 #else
679     *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
680     return 1;
681 #endif
682 }
683 
CRYPTO_atomic_load(uint64_t * val,uint64_t * ret,CRYPTO_RWLOCK * lock)684 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
685 {
686 #if (defined(NO_INTERLOCKEDOR64))
687     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
688         return 0;
689     *ret = *val;
690     if (!CRYPTO_THREAD_unlock(lock))
691         return 0;
692 
693     return 1;
694 #else
695     *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
696     return 1;
697 #endif
698 }
699 
CRYPTO_atomic_store(uint64_t * dst,uint64_t val,CRYPTO_RWLOCK * lock)700 int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
701 {
702 #if (defined(NO_INTERLOCKEDOR64))
703     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
704         return 0;
705     *dst = val;
706     if (!CRYPTO_THREAD_unlock(lock))
707         return 0;
708 
709     return 1;
710 #else
711     InterlockedExchange64(dst, val);
712     return 1;
713 #endif
714 }
715 
CRYPTO_atomic_load_int(int * val,int * ret,CRYPTO_RWLOCK * lock)716 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
717 {
718 #if (defined(NO_INTERLOCKEDOR64))
719     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
720         return 0;
721     *ret = *val;
722     if (!CRYPTO_THREAD_unlock(lock))
723         return 0;
724 
725     return 1;
726 #else
727     /* On Windows, LONG (but not long) is always the same size as int. */
728     *ret = (int)InterlockedOr((LONG volatile *)val, 0);
729     return 1;
730 #endif
731 }
732 
openssl_init_fork_handlers(void)733 int openssl_init_fork_handlers(void)
734 {
735     return 0;
736 }
737 
openssl_get_fork_id(void)738 int openssl_get_fork_id(void)
739 {
740     return 0;
741 }
742 #endif
743