1 /*
2 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #if defined(_WIN32)
11 # include <windows.h>
12 # if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
13 # define USE_RWLOCK
14 # endif
15 #endif
16 #include <assert.h>
17
18 /*
19 * VC++ 2008 or earlier x86 compilers do not have an inline implementation
20 * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
21 * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
22 * To work around this problem, we implement a manual locking mechanism for
23 * only VC++ 2008 or earlier x86 compilers.
24 */
25
26 #if ((defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600) || (defined(__MINGW32__) && !defined(__MINGW64__)))
27 # define NO_INTERLOCKEDOR64
28 #endif
29
30 #include <openssl/crypto.h>
31 #include <crypto/cryptlib.h>
32 #include "internal/common.h"
33 #include "internal/thread_arch.h"
34 #include "internal/rcu.h"
35 #include "rcu_internal.h"
36
37 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
38
39 # ifdef USE_RWLOCK
40 typedef struct {
41 SRWLOCK lock;
42 int exclusive;
43 } CRYPTO_win_rwlock;
44 # endif
45
46 /*
47 * This defines a quescent point (qp)
48 * This is the barrier beyond which a writer
49 * must wait before freeing data that was
50 * atomically updated
51 */
52 struct rcu_qp {
53 volatile uint64_t users;
54 };
55
56 struct thread_qp {
57 struct rcu_qp *qp;
58 unsigned int depth;
59 CRYPTO_RCU_LOCK *lock;
60 };
61
62 # define MAX_QPS 10
63 /*
64 * This is the per thread tracking data
65 * that is assigned to each thread participating
66 * in an rcu qp
67 *
68 * qp points to the qp that it last acquired
69 *
70 */
71 struct rcu_thr_data {
72 struct thread_qp thread_qps[MAX_QPS];
73 };
74
75 /*
76 * This is the internal version of a CRYPTO_RCU_LOCK
77 * it is cast from CRYPTO_RCU_LOCK
78 */
79 struct rcu_lock_st {
80 /* Callbacks to call for next ossl_synchronize_rcu */
81 struct rcu_cb_item *cb_items;
82
83 /* The context we are being created against */
84 OSSL_LIB_CTX *ctx;
85
86 /* Array of quiescent points for synchronization */
87 struct rcu_qp *qp_group;
88
89 /* rcu generation counter for in-order retirement */
90 uint32_t id_ctr;
91
92 /* Number of elements in qp_group array */
93 uint32_t group_count;
94
95 /* Index of the current qp in the qp_group array */
96 uint32_t reader_idx;
97
98 /* value of the next id_ctr value to be retired */
99 uint32_t next_to_retire;
100
101 /* index of the next free rcu_qp in the qp_group */
102 uint32_t current_alloc_idx;
103
104 /* number of qp's in qp_group array currently being retired */
105 uint32_t writers_alloced;
106
107 /* lock protecting write side operations */
108 CRYPTO_MUTEX *write_lock;
109
110 /* lock protecting updates to writers_alloced/current_alloc_idx */
111 CRYPTO_MUTEX *alloc_lock;
112
113 /* signal to wake threads waiting on alloc_lock */
114 CRYPTO_CONDVAR *alloc_signal;
115
116 /* lock to enforce in-order retirement */
117 CRYPTO_MUTEX *prior_lock;
118
119 /* signal to wake threads waiting on prior_lock */
120 CRYPTO_CONDVAR *prior_signal;
121
122 /* lock used with NO_INTERLOCKEDOR64: VS2010 x86 */
123 CRYPTO_RWLOCK *rw_lock;
124 };
125
allocate_new_qp_group(struct rcu_lock_st * lock,uint32_t count)126 static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
127 uint32_t count)
128 {
129 struct rcu_qp *new =
130 OPENSSL_zalloc(sizeof(*new) * count);
131
132 lock->group_count = count;
133 return new;
134 }
135
ossl_rcu_lock_new(int num_writers,OSSL_LIB_CTX * ctx)136 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
137 {
138 struct rcu_lock_st *new;
139
140 /*
141 * We need a minimum of 2 qps
142 */
143 if (num_writers < 2)
144 num_writers = 2;
145
146 ctx = ossl_lib_ctx_get_concrete(ctx);
147 if (ctx == NULL)
148 return 0;
149
150 new = OPENSSL_zalloc(sizeof(*new));
151
152 if (new == NULL)
153 return NULL;
154
155 new->ctx = ctx;
156 new->rw_lock = CRYPTO_THREAD_lock_new();
157 new->write_lock = ossl_crypto_mutex_new();
158 new->alloc_signal = ossl_crypto_condvar_new();
159 new->prior_signal = ossl_crypto_condvar_new();
160 new->alloc_lock = ossl_crypto_mutex_new();
161 new->prior_lock = ossl_crypto_mutex_new();
162 new->qp_group = allocate_new_qp_group(new, num_writers);
163 if (new->qp_group == NULL
164 || new->alloc_signal == NULL
165 || new->prior_signal == NULL
166 || new->write_lock == NULL
167 || new->alloc_lock == NULL
168 || new->prior_lock == NULL
169 || new->rw_lock == NULL) {
170 CRYPTO_THREAD_lock_free(new->rw_lock);
171 OPENSSL_free(new->qp_group);
172 ossl_crypto_condvar_free(&new->alloc_signal);
173 ossl_crypto_condvar_free(&new->prior_signal);
174 ossl_crypto_mutex_free(&new->alloc_lock);
175 ossl_crypto_mutex_free(&new->prior_lock);
176 ossl_crypto_mutex_free(&new->write_lock);
177 OPENSSL_free(new);
178 new = NULL;
179 }
180
181 return new;
182
183 }
184
ossl_rcu_lock_free(CRYPTO_RCU_LOCK * lock)185 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
186 {
187 CRYPTO_THREAD_lock_free(lock->rw_lock);
188 OPENSSL_free(lock->qp_group);
189 ossl_crypto_condvar_free(&lock->alloc_signal);
190 ossl_crypto_condvar_free(&lock->prior_signal);
191 ossl_crypto_mutex_free(&lock->alloc_lock);
192 ossl_crypto_mutex_free(&lock->prior_lock);
193 ossl_crypto_mutex_free(&lock->write_lock);
194 OPENSSL_free(lock);
195 }
196
197 /* Read side acquisition of the current qp */
get_hold_current_qp(CRYPTO_RCU_LOCK * lock)198 static ossl_inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
199 {
200 uint32_t qp_idx;
201 uint32_t tmp;
202 uint64_t tmp64;
203
204 /* get the current qp index */
205 for (;;) {
206 CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&qp_idx,
207 lock->rw_lock);
208 CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)1, &tmp64,
209 lock->rw_lock);
210 CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&tmp,
211 lock->rw_lock);
212 if (qp_idx == tmp)
213 break;
214 CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)-1, &tmp64,
215 lock->rw_lock);
216 }
217
218 return &lock->qp_group[qp_idx];
219 }
220
ossl_rcu_free_local_data(void * arg)221 static void ossl_rcu_free_local_data(void *arg)
222 {
223 OSSL_LIB_CTX *ctx = arg;
224 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
225 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
226 OPENSSL_free(data);
227 CRYPTO_THREAD_set_local(lkey, NULL);
228 }
229
ossl_rcu_read_lock(CRYPTO_RCU_LOCK * lock)230 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
231 {
232 struct rcu_thr_data *data;
233 int i;
234 int available_qp = -1;
235 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
236
237 /*
238 * we're going to access current_qp here so ask the
239 * processor to fetch it
240 */
241 data = CRYPTO_THREAD_get_local(lkey);
242
243 if (data == NULL) {
244 data = OPENSSL_zalloc(sizeof(*data));
245 OPENSSL_assert(data != NULL);
246 CRYPTO_THREAD_set_local(lkey, data);
247 ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
248 }
249
250 for (i = 0; i < MAX_QPS; i++) {
251 if (data->thread_qps[i].qp == NULL && available_qp == -1)
252 available_qp = i;
253 /* If we have a hold on this lock already, we're good */
254 if (data->thread_qps[i].lock == lock)
255 return;
256 }
257
258 /*
259 * if we get here, then we don't have a hold on this lock yet
260 */
261 assert(available_qp != -1);
262
263 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
264 data->thread_qps[available_qp].depth = 1;
265 data->thread_qps[available_qp].lock = lock;
266 }
267
ossl_rcu_write_lock(CRYPTO_RCU_LOCK * lock)268 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
269 {
270 ossl_crypto_mutex_lock(lock->write_lock);
271 }
272
ossl_rcu_write_unlock(CRYPTO_RCU_LOCK * lock)273 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
274 {
275 ossl_crypto_mutex_unlock(lock->write_lock);
276 }
277
ossl_rcu_read_unlock(CRYPTO_RCU_LOCK * lock)278 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
279 {
280 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
281 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
282 int i;
283 LONG64 ret;
284
285 assert(data != NULL);
286
287 for (i = 0; i < MAX_QPS; i++) {
288 if (data->thread_qps[i].lock == lock) {
289 data->thread_qps[i].depth--;
290 if (data->thread_qps[i].depth == 0) {
291 CRYPTO_atomic_add64(&data->thread_qps[i].qp->users,
292 (uint64_t)-1, (uint64_t *)&ret,
293 lock->rw_lock);
294 OPENSSL_assert(ret >= 0);
295 data->thread_qps[i].qp = NULL;
296 data->thread_qps[i].lock = NULL;
297 }
298 return;
299 }
300 }
301 }
302
303 /*
304 * Write side allocation routine to get the current qp
305 * and replace it with a new one
306 */
update_qp(CRYPTO_RCU_LOCK * lock,uint32_t * curr_id)307 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
308 {
309 uint32_t current_idx;
310 uint32_t tmp;
311
312 ossl_crypto_mutex_lock(lock->alloc_lock);
313 /*
314 * we need at least one qp to be available with one
315 * left over, so that readers can start working on
316 * one that isn't yet being waited on
317 */
318 while (lock->group_count - lock->writers_alloced < 2)
319 /* we have to wait for one to be free */
320 ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock);
321
322 current_idx = lock->current_alloc_idx;
323
324 /* Allocate the qp */
325 lock->writers_alloced++;
326
327 /* increment the allocation index */
328 lock->current_alloc_idx =
329 (lock->current_alloc_idx + 1) % lock->group_count;
330
331 /* get and insert a new id */
332 *curr_id = lock->id_ctr;
333 lock->id_ctr++;
334
335 /* update the reader index to be the prior qp */
336 tmp = lock->current_alloc_idx;
337 # if (defined(NO_INTERLOCKEDOR64))
338 CRYPTO_THREAD_write_lock(lock->rw_lock);
339 lock->reader_idx = tmp;
340 CRYPTO_THREAD_unlock(lock->rw_lock);
341 # else
342 InterlockedExchange((LONG volatile *)&lock->reader_idx, tmp);
343 # endif
344
345 /* wake up any waiters */
346 ossl_crypto_condvar_broadcast(lock->alloc_signal);
347 ossl_crypto_mutex_unlock(lock->alloc_lock);
348 return &lock->qp_group[current_idx];
349 }
350
retire_qp(CRYPTO_RCU_LOCK * lock,struct rcu_qp * qp)351 static void retire_qp(CRYPTO_RCU_LOCK *lock,
352 struct rcu_qp *qp)
353 {
354 ossl_crypto_mutex_lock(lock->alloc_lock);
355 lock->writers_alloced--;
356 ossl_crypto_condvar_broadcast(lock->alloc_signal);
357 ossl_crypto_mutex_unlock(lock->alloc_lock);
358 }
359
360
ossl_synchronize_rcu(CRYPTO_RCU_LOCK * lock)361 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
362 {
363 struct rcu_qp *qp;
364 uint64_t count;
365 uint32_t curr_id;
366 struct rcu_cb_item *cb_items, *tmpcb;
367
368 /* before we do anything else, lets grab the cb list */
369 ossl_crypto_mutex_lock(lock->write_lock);
370 cb_items = lock->cb_items;
371 lock->cb_items = NULL;
372 ossl_crypto_mutex_unlock(lock->write_lock);
373
374 qp = update_qp(lock, &curr_id);
375
376 /* retire in order */
377 ossl_crypto_mutex_lock(lock->prior_lock);
378 while (lock->next_to_retire != curr_id)
379 ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock);
380
381 /* wait for the reader count to reach zero */
382 do {
383 CRYPTO_atomic_load(&qp->users, &count, lock->rw_lock);
384 } while (count != (uint64_t)0);
385
386 lock->next_to_retire++;
387 ossl_crypto_condvar_broadcast(lock->prior_signal);
388 ossl_crypto_mutex_unlock(lock->prior_lock);
389
390 retire_qp(lock, qp);
391
392 /* handle any callbacks that we have */
393 while (cb_items != NULL) {
394 tmpcb = cb_items;
395 cb_items = cb_items->next;
396 tmpcb->fn(tmpcb->data);
397 OPENSSL_free(tmpcb);
398 }
399
400 /* and we're done */
401 return;
402
403 }
404
405 /*
406 * Note, must be called under the protection of ossl_rcu_write_lock
407 */
ossl_rcu_call(CRYPTO_RCU_LOCK * lock,rcu_cb_fn cb,void * data)408 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
409 {
410 struct rcu_cb_item *new;
411
412 new = OPENSSL_zalloc(sizeof(struct rcu_cb_item));
413 if (new == NULL)
414 return 0;
415 new->data = data;
416 new->fn = cb;
417
418 new->next = lock->cb_items;
419 lock->cb_items = new;
420
421 return 1;
422 }
423
ossl_rcu_uptr_deref(void ** p)424 void *ossl_rcu_uptr_deref(void **p)
425 {
426 return (void *)*p;
427 }
428
ossl_rcu_assign_uptr(void ** p,void ** v)429 void ossl_rcu_assign_uptr(void **p, void **v)
430 {
431 InterlockedExchangePointer((void * volatile *)p, (void *)*v);
432 }
433
434
CRYPTO_THREAD_lock_new(void)435 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
436 {
437 CRYPTO_RWLOCK *lock;
438 # ifdef USE_RWLOCK
439 CRYPTO_win_rwlock *rwlock;
440
441 if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL)
442 /* Don't set error, to avoid recursion blowup. */
443 return NULL;
444 rwlock = lock;
445 InitializeSRWLock(&rwlock->lock);
446 # else
447
448 if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL)
449 /* Don't set error, to avoid recursion blowup. */
450 return NULL;
451
452 # if !defined(_WIN32_WCE)
453 /* 0x400 is the spin count value suggested in the documentation */
454 if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) {
455 OPENSSL_free(lock);
456 return NULL;
457 }
458 # else
459 InitializeCriticalSection(lock);
460 # endif
461 # endif
462
463 return lock;
464 }
465
CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK * lock)466 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
467 {
468 # ifdef USE_RWLOCK
469 CRYPTO_win_rwlock *rwlock = lock;
470
471 AcquireSRWLockShared(&rwlock->lock);
472 # else
473 EnterCriticalSection(lock);
474 # endif
475 return 1;
476 }
477
CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK * lock)478 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
479 {
480 # ifdef USE_RWLOCK
481 CRYPTO_win_rwlock *rwlock = lock;
482
483 AcquireSRWLockExclusive(&rwlock->lock);
484 rwlock->exclusive = 1;
485 # else
486 EnterCriticalSection(lock);
487 # endif
488 return 1;
489 }
490
CRYPTO_THREAD_unlock(CRYPTO_RWLOCK * lock)491 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
492 {
493 # ifdef USE_RWLOCK
494 CRYPTO_win_rwlock *rwlock = lock;
495
496 if (rwlock->exclusive) {
497 rwlock->exclusive = 0;
498 ReleaseSRWLockExclusive(&rwlock->lock);
499 } else {
500 ReleaseSRWLockShared(&rwlock->lock);
501 }
502 # else
503 LeaveCriticalSection(lock);
504 # endif
505 return 1;
506 }
507
CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK * lock)508 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
509 {
510 if (lock == NULL)
511 return;
512
513 # ifndef USE_RWLOCK
514 DeleteCriticalSection(lock);
515 # endif
516 OPENSSL_free(lock);
517
518 return;
519 }
520
521 # define ONCE_UNINITED 0
522 # define ONCE_ININIT 1
523 # define ONCE_DONE 2
524
525 /*
526 * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
527 * we still have to support.
528 */
CRYPTO_THREAD_run_once(CRYPTO_ONCE * once,void (* init)(void))529 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
530 {
531 LONG volatile *lock = (LONG *)once;
532 LONG result;
533
534 if (*lock == ONCE_DONE)
535 return 1;
536
537 do {
538 result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED);
539 if (result == ONCE_UNINITED) {
540 init();
541 *lock = ONCE_DONE;
542 return 1;
543 }
544 } while (result == ONCE_ININIT);
545
546 return (*lock == ONCE_DONE);
547 }
548
CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL * key,void (* cleanup)(void *))549 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
550 {
551 *key = TlsAlloc();
552 if (*key == TLS_OUT_OF_INDEXES)
553 return 0;
554
555 return 1;
556 }
557
CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL * key)558 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
559 {
560 DWORD last_error;
561 void *ret;
562
563 /*
564 * TlsGetValue clears the last error even on success, so that callers may
565 * distinguish it successfully returning NULL or failing. It is documented
566 * to never fail if the argument is a valid index from TlsAlloc, so we do
567 * not need to handle this.
568 *
569 * However, this error-mangling behavior interferes with the caller's use of
570 * GetLastError. In particular SSL_get_error queries the error queue to
571 * determine whether the caller should look at the OS's errors. To avoid
572 * destroying state, save and restore the Windows error.
573 *
574 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
575 */
576 last_error = GetLastError();
577 ret = TlsGetValue(*key);
578 SetLastError(last_error);
579 return ret;
580 }
581
CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL * key,void * val)582 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
583 {
584 if (TlsSetValue(*key, val) == 0)
585 return 0;
586
587 return 1;
588 }
589
CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL * key)590 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
591 {
592 if (TlsFree(*key) == 0)
593 return 0;
594
595 return 1;
596 }
597
CRYPTO_THREAD_get_current_id(void)598 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
599 {
600 return GetCurrentThreadId();
601 }
602
CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a,CRYPTO_THREAD_ID b)603 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
604 {
605 return (a == b);
606 }
607
CRYPTO_atomic_add(int * val,int amount,int * ret,CRYPTO_RWLOCK * lock)608 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
609 {
610 # if (defined(NO_INTERLOCKEDOR64))
611 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
612 return 0;
613 *val += amount;
614 *ret = *val;
615
616 if (!CRYPTO_THREAD_unlock(lock))
617 return 0;
618
619 return 1;
620 # else
621 *ret = (int)InterlockedExchangeAdd((LONG volatile *)val, (LONG)amount)
622 + amount;
623 return 1;
624 # endif
625 }
626
CRYPTO_atomic_add64(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)627 int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
628 CRYPTO_RWLOCK *lock)
629 {
630 # if (defined(NO_INTERLOCKEDOR64))
631 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
632 return 0;
633 *val += op;
634 *ret = *val;
635
636 if (!CRYPTO_THREAD_unlock(lock))
637 return 0;
638
639 return 1;
640 # else
641 *ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
642 return 1;
643 # endif
644 }
645
CRYPTO_atomic_and(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)646 int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
647 CRYPTO_RWLOCK *lock)
648 {
649 # if (defined(NO_INTERLOCKEDOR64))
650 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
651 return 0;
652 *val &= op;
653 *ret = *val;
654
655 if (!CRYPTO_THREAD_unlock(lock))
656 return 0;
657
658 return 1;
659 # else
660 *ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op) & op;
661 return 1;
662 # endif
663 }
664
CRYPTO_atomic_or(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)665 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
666 CRYPTO_RWLOCK *lock)
667 {
668 # if (defined(NO_INTERLOCKEDOR64))
669 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
670 return 0;
671 *val |= op;
672 *ret = *val;
673
674 if (!CRYPTO_THREAD_unlock(lock))
675 return 0;
676
677 return 1;
678 # else
679 *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
680 return 1;
681 # endif
682 }
683
CRYPTO_atomic_load(uint64_t * val,uint64_t * ret,CRYPTO_RWLOCK * lock)684 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
685 {
686 # if (defined(NO_INTERLOCKEDOR64))
687 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
688 return 0;
689 *ret = *val;
690 if (!CRYPTO_THREAD_unlock(lock))
691 return 0;
692
693 return 1;
694 # else
695 *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
696 return 1;
697 # endif
698 }
699
CRYPTO_atomic_store(uint64_t * dst,uint64_t val,CRYPTO_RWLOCK * lock)700 int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
701 {
702 # if (defined(NO_INTERLOCKEDOR64))
703 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
704 return 0;
705 *dst = val;
706 if (!CRYPTO_THREAD_unlock(lock))
707 return 0;
708
709 return 1;
710 # else
711 InterlockedExchange64(dst, val);
712 return 1;
713 # endif
714 }
715
CRYPTO_atomic_load_int(int * val,int * ret,CRYPTO_RWLOCK * lock)716 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
717 {
718 # if (defined(NO_INTERLOCKEDOR64))
719 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
720 return 0;
721 *ret = *val;
722 if (!CRYPTO_THREAD_unlock(lock))
723 return 0;
724
725 return 1;
726 # else
727 /* On Windows, LONG (but not long) is always the same size as int. */
728 *ret = (int)InterlockedOr((LONG volatile *)val, 0);
729 return 1;
730 # endif
731 }
732
openssl_init_fork_handlers(void)733 int openssl_init_fork_handlers(void)
734 {
735 return 0;
736 }
737
openssl_get_fork_id(void)738 int openssl_get_fork_id(void)
739 {
740 return 0;
741 }
742 #endif
743