1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Justin Hibbits 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/param.h> 29 #include <sys/kernel.h> 30 #include <sys/lock.h> 31 #include <sys/mutex.h> 32 #include <sys/smp.h> 33 #include <sys/systm.h> 34 35 #include <machine/atomic.h> 36 #include <machine/param.h> 37 38 #include <vm/vm.h> 39 #include <vm/pmap.h> 40 41 enum { 42 ATOMIC64_ADD, 43 ATOMIC64_CLEAR, 44 ATOMIC64_CMPSET, 45 ATOMIC64_FCMPSET, 46 ATOMIC64_FETCHADD, 47 ATOMIC64_LOAD, 48 ATOMIC64_SET, 49 ATOMIC64_SUBTRACT, 50 ATOMIC64_STORE, 51 ATOMIC64_SWAP 52 }; 53 54 #ifdef _KERNEL 55 #ifdef SMP 56 57 #define A64_POOL_SIZE MAXCPU 58 /* Estimated size of a cacheline */ 59 #define CACHE_ALIGN CACHE_LINE_SIZE 60 static struct mtx a64_mtx_pool[A64_POOL_SIZE]; 61 62 #define GET_MUTEX(p) \ 63 (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)]) 64 65 #define LOCK_A64() \ 66 struct mtx *_amtx = GET_MUTEX(p); \ 67 if (smp_started) mtx_lock(_amtx) 68 69 #define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx) 70 71 #else /* !SMP */ 72 73 #define LOCK_A64() { register_t s = intr_disable() 74 #define UNLOCK_A64() intr_restore(s); } 75 76 #endif /* SMP */ 77 78 #define ATOMIC64_EMU_UN(op, rt, block, ret) \ 79 rt \ 80 atomic_##op##_64(volatile uint64_t *p) { \ 81 uint64_t tmp __unused; \ 82 LOCK_A64(); \ 83 block; \ 84 UNLOCK_A64(); \ 85 ret; } struct hack 86 87 #define ATOMIC64_EMU_BIN(op, rt, block, ret) \ 88 rt \ 89 atomic_##op##_64(volatile uint64_t *p, uint64_t v) { \ 90 uint64_t tmp __unused; \ 91 LOCK_A64(); \ 92 block; \ 93 UNLOCK_A64(); \ 94 ret; } struct hack 95 96 ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return); 97 ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return); 98 ATOMIC64_EMU_BIN(fetchadd, uint64_t, (*p = *p + v, v = *p - v), return (v)); 99 ATOMIC64_EMU_UN(load, uint64_t, (tmp = *p), return (tmp)); 100 ATOMIC64_EMU_BIN(set, void, *p |= v, return); 101 ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return); 102 ATOMIC64_EMU_BIN(store, void, *p = v, return); 103 ATOMIC64_EMU_BIN(swap, uint64_t, tmp = *p; *p = v; v = tmp, return(v)); 104 105 int atomic_cmpset_64(volatile uint64_t *p, uint64_t old, uint64_t new) 106 { 107 uint64_t tmp; 108 109 LOCK_A64(); 110 tmp = *p; 111 if (tmp == old) 112 *p = new; 113 UNLOCK_A64(); 114 115 return (tmp == old); 116 } 117 118 int atomic_fcmpset_64(volatile uint64_t *p, uint64_t *old, uint64_t new) 119 { 120 uint64_t tmp, tmp_old; 121 122 LOCK_A64(); 123 tmp = *p; 124 tmp_old = *old; 125 if (tmp == tmp_old) 126 *p = new; 127 else 128 *old = tmp; 129 UNLOCK_A64(); 130 131 return (tmp == tmp_old); 132 } 133 134 #ifdef SMP 135 static void 136 atomic64_mtxinit(void *x __unused) 137 { 138 int i; 139 140 for (i = 0; i < A64_POOL_SIZE; i++) 141 mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF); 142 } 143 144 SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL); 145 #endif /* SMP */ 146 147 #endif /* _KERNEL */ 148