1 /*- 2 * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/lock.h> 32 #include <sys/mutex.h> 33 #include <sys/atomic.h> 34 35 #if !defined(__LP64__) && !defined(__mips_n32) && \ 36 !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) 37 38 #ifdef _KERNEL 39 #include <sys/kernel.h> 40 41 struct mtx atomic_mtx; 42 MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF); 43 #else 44 #include <pthread.h> 45 46 #define mtx_lock(lock) pthread_mutex_lock(lock) 47 #define mtx_unlock(lock) pthread_mutex_unlock(lock) 48 49 static pthread_mutex_t atomic_mtx; 50 51 static __attribute__((constructor)) void 52 atomic_init(void) 53 { 54 pthread_mutex_init(&atomic_mtx, NULL); 55 } 56 #endif 57 58 void 59 atomic_add_64(volatile uint64_t *target, int64_t delta) 60 { 61 62 mtx_lock(&atomic_mtx); 63 *target += delta; 64 mtx_unlock(&atomic_mtx); 65 } 66 67 void 68 atomic_dec_64(volatile uint64_t *target) 69 { 70 71 mtx_lock(&atomic_mtx); 72 *target -= 1; 73 mtx_unlock(&atomic_mtx); 74 } 75 76 uint64_t 77 atomic_swap_64(volatile uint64_t *a, uint64_t value) 78 { 79 uint64_t ret; 80 81 mtx_lock(&atomic_mtx); 82 ret = *a; 83 *a = value; 84 mtx_unlock(&atomic_mtx); 85 return (ret); 86 } 87 88 uint64_t 89 atomic_load_64(volatile uint64_t *a) 90 { 91 uint64_t ret; 92 93 mtx_lock(&atomic_mtx); 94 ret = *a; 95 mtx_unlock(&atomic_mtx); 96 return (ret); 97 } 98 99 uint64_t 100 atomic_add_64_nv(volatile uint64_t *target, int64_t delta) 101 { 102 uint64_t newval; 103 104 mtx_lock(&atomic_mtx); 105 newval = (*target += delta); 106 mtx_unlock(&atomic_mtx); 107 return (newval); 108 } 109 110 uint64_t 111 atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) 112 { 113 uint64_t oldval; 114 115 mtx_lock(&atomic_mtx); 116 oldval = *target; 117 if (oldval == cmp) 118 *target = newval; 119 mtx_unlock(&atomic_mtx); 120 return (oldval); 121 } 122 #endif 123