1 /*- 2 * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/lock.h> 32 #include <sys/mutex.h> 33 #include <sys/atomic.h> 34 35 #if !defined(__LP64__) && !defined(__mips_n32) && \ 36 !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ 37 !defined(HAS_EMULATED_ATOMIC64) 38 39 #ifdef _KERNEL 40 #include <sys/kernel.h> 41 42 struct mtx atomic_mtx; 43 MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF); 44 #else 45 #include <pthread.h> 46 47 #define mtx_lock(lock) pthread_mutex_lock(lock) 48 #define mtx_unlock(lock) pthread_mutex_unlock(lock) 49 50 static pthread_mutex_t atomic_mtx; 51 52 static __attribute__((constructor)) void 53 atomic_init(void) 54 { 55 pthread_mutex_init(&atomic_mtx, NULL); 56 } 57 #endif 58 59 void 60 atomic_add_64(volatile uint64_t *target, int64_t delta) 61 { 62 63 mtx_lock(&atomic_mtx); 64 *target += delta; 65 mtx_unlock(&atomic_mtx); 66 } 67 68 void 69 atomic_dec_64(volatile uint64_t *target) 70 { 71 72 mtx_lock(&atomic_mtx); 73 *target -= 1; 74 mtx_unlock(&atomic_mtx); 75 } 76 77 uint64_t 78 atomic_swap_64(volatile uint64_t *a, uint64_t value) 79 { 80 uint64_t ret; 81 82 mtx_lock(&atomic_mtx); 83 ret = *a; 84 *a = value; 85 mtx_unlock(&atomic_mtx); 86 return (ret); 87 } 88 89 uint64_t 90 atomic_load_64(volatile uint64_t *a) 91 { 92 uint64_t ret; 93 94 mtx_lock(&atomic_mtx); 95 ret = *a; 96 mtx_unlock(&atomic_mtx); 97 return (ret); 98 } 99 100 uint64_t 101 atomic_add_64_nv(volatile uint64_t *target, int64_t delta) 102 { 103 uint64_t newval; 104 105 mtx_lock(&atomic_mtx); 106 newval = (*target += delta); 107 mtx_unlock(&atomic_mtx); 108 return (newval); 109 } 110 111 uint64_t 112 atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) 113 { 114 uint64_t oldval; 115 116 mtx_lock(&atomic_mtx); 117 oldval = *target; 118 if (oldval == cmp) 119 *target = newval; 120 mtx_unlock(&atomic_mtx); 121 return (oldval); 122 } 123 #endif 124