19aafc7c0SBrandon Bergren /*- 29aafc7c0SBrandon Bergren * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 39aafc7c0SBrandon Bergren * 49aafc7c0SBrandon Bergren * Copyright (c) 2019 Justin Hibbits 59aafc7c0SBrandon Bergren * 69aafc7c0SBrandon Bergren * Redistribution and use in source and binary forms, with or without 79aafc7c0SBrandon Bergren * modification, are permitted provided that the following conditions 89aafc7c0SBrandon Bergren * are met: 99aafc7c0SBrandon Bergren * 1. Redistributions of source code must retain the above copyright 109aafc7c0SBrandon Bergren * notice, this list of conditions and the following disclaimer. 119aafc7c0SBrandon Bergren * 2. Redistributions in binary form must reproduce the above copyright 129aafc7c0SBrandon Bergren * notice, this list of conditions and the following disclaimer in the 139aafc7c0SBrandon Bergren * documentation and/or other materials provided with the distribution. 149aafc7c0SBrandon Bergren * 159aafc7c0SBrandon Bergren * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 169aafc7c0SBrandon Bergren * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 179aafc7c0SBrandon Bergren * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 189aafc7c0SBrandon Bergren * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 199aafc7c0SBrandon Bergren * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 209aafc7c0SBrandon Bergren * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 219aafc7c0SBrandon Bergren * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 229aafc7c0SBrandon Bergren * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 239aafc7c0SBrandon Bergren * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 249aafc7c0SBrandon Bergren * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 259aafc7c0SBrandon Bergren * SUCH DAMAGE. 269aafc7c0SBrandon Bergren * 279aafc7c0SBrandon Bergren * $FreeBSD$ 289aafc7c0SBrandon Bergren */ 299aafc7c0SBrandon Bergren 309aafc7c0SBrandon Bergren #include <sys/cdefs.h> 319aafc7c0SBrandon Bergren #include <sys/param.h> 329aafc7c0SBrandon Bergren #include <sys/kernel.h> 339aafc7c0SBrandon Bergren #include <sys/lock.h> 349aafc7c0SBrandon Bergren #include <sys/mutex.h> 359aafc7c0SBrandon Bergren #include <sys/smp.h> 369aafc7c0SBrandon Bergren #include <sys/systm.h> 379aafc7c0SBrandon Bergren 389aafc7c0SBrandon Bergren #include <machine/atomic.h> 399aafc7c0SBrandon Bergren #include <machine/param.h> 409aafc7c0SBrandon Bergren 419aafc7c0SBrandon Bergren #include <vm/vm.h> 429aafc7c0SBrandon Bergren #include <vm/pmap.h> 439aafc7c0SBrandon Bergren 449aafc7c0SBrandon Bergren enum { 459aafc7c0SBrandon Bergren ATOMIC64_ADD, 469aafc7c0SBrandon Bergren ATOMIC64_CLEAR, 479aafc7c0SBrandon Bergren ATOMIC64_CMPSET, 489aafc7c0SBrandon Bergren ATOMIC64_FCMPSET, 499aafc7c0SBrandon Bergren ATOMIC64_FETCHADD, 509aafc7c0SBrandon Bergren ATOMIC64_LOAD, 519aafc7c0SBrandon Bergren ATOMIC64_SET, 529aafc7c0SBrandon Bergren ATOMIC64_SUBTRACT, 539aafc7c0SBrandon Bergren ATOMIC64_STORE, 549aafc7c0SBrandon Bergren ATOMIC64_SWAP 559aafc7c0SBrandon Bergren }; 569aafc7c0SBrandon Bergren 579aafc7c0SBrandon Bergren #ifdef _KERNEL 58*3a22f09cSKyle Evans #ifdef SMP 59*3a22f09cSKyle Evans 609aafc7c0SBrandon Bergren #define A64_POOL_SIZE MAXCPU 619aafc7c0SBrandon Bergren /* Estimated size of a cacheline */ 629aafc7c0SBrandon Bergren #define CACHE_ALIGN CACHE_LINE_SIZE 63*3a22f09cSKyle Evans static struct mtx a64_mtx_pool[A64_POOL_SIZE]; 649aafc7c0SBrandon Bergren 659aafc7c0SBrandon Bergren #define GET_MUTEX(p) \ 669aafc7c0SBrandon Bergren (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)]) 679aafc7c0SBrandon Bergren 689aafc7c0SBrandon Bergren #define LOCK_A64() \ 699aafc7c0SBrandon Bergren struct mtx *_amtx = GET_MUTEX(p); \ 709aafc7c0SBrandon Bergren if (smp_started) mtx_lock(_amtx) 719aafc7c0SBrandon Bergren 729aafc7c0SBrandon Bergren #define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx) 739aafc7c0SBrandon Bergren 74*3a22f09cSKyle Evans #else /* !SMP */ 75*3a22f09cSKyle Evans 76*3a22f09cSKyle Evans #define LOCK_A64() { register_t s = intr_disable() 77*3a22f09cSKyle Evans #define UNLOCK_A64() intr_restore(s); } 78*3a22f09cSKyle Evans 79*3a22f09cSKyle Evans #endif /* SMP */ 80*3a22f09cSKyle Evans 819aafc7c0SBrandon Bergren #define ATOMIC64_EMU_UN(op, rt, block, ret) \ 829aafc7c0SBrandon Bergren rt \ 839aafc7c0SBrandon Bergren atomic_##op##_64(volatile u_int64_t *p) { \ 849aafc7c0SBrandon Bergren u_int64_t tmp __unused; \ 859aafc7c0SBrandon Bergren LOCK_A64(); \ 869aafc7c0SBrandon Bergren block; \ 879aafc7c0SBrandon Bergren UNLOCK_A64(); \ 889aafc7c0SBrandon Bergren ret; } struct hack 899aafc7c0SBrandon Bergren 909aafc7c0SBrandon Bergren #define ATOMIC64_EMU_BIN(op, rt, block, ret) \ 919aafc7c0SBrandon Bergren rt \ 929aafc7c0SBrandon Bergren atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \ 939aafc7c0SBrandon Bergren u_int64_t tmp __unused; \ 949aafc7c0SBrandon Bergren LOCK_A64(); \ 959aafc7c0SBrandon Bergren block; \ 969aafc7c0SBrandon Bergren UNLOCK_A64(); \ 979aafc7c0SBrandon Bergren ret; } struct hack 989aafc7c0SBrandon Bergren 999aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return); 1009aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return); 1019aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v)); 1029aafc7c0SBrandon Bergren ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp)); 1039aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(set, void, *p |= v, return); 1049aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return); 1059aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(store, void, *p = v, return); 1069aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v)); 1079aafc7c0SBrandon Bergren 1089aafc7c0SBrandon Bergren int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new) 1099aafc7c0SBrandon Bergren { 1109aafc7c0SBrandon Bergren u_int64_t tmp; 1119aafc7c0SBrandon Bergren 1129aafc7c0SBrandon Bergren LOCK_A64(); 1139aafc7c0SBrandon Bergren tmp = *p; 1149aafc7c0SBrandon Bergren if (tmp == old) 1159aafc7c0SBrandon Bergren *p = new; 1169aafc7c0SBrandon Bergren UNLOCK_A64(); 1179aafc7c0SBrandon Bergren 1189aafc7c0SBrandon Bergren return (tmp == old); 1199aafc7c0SBrandon Bergren } 1209aafc7c0SBrandon Bergren 1219aafc7c0SBrandon Bergren int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new) 1229aafc7c0SBrandon Bergren { 1239aafc7c0SBrandon Bergren u_int64_t tmp, tmp_old; 1249aafc7c0SBrandon Bergren 1259aafc7c0SBrandon Bergren LOCK_A64(); 1269aafc7c0SBrandon Bergren tmp = *p; 1279aafc7c0SBrandon Bergren tmp_old = *old; 1289aafc7c0SBrandon Bergren if (tmp == tmp_old) 1299aafc7c0SBrandon Bergren *p = new; 1309aafc7c0SBrandon Bergren else 1319aafc7c0SBrandon Bergren *old = tmp; 1329aafc7c0SBrandon Bergren UNLOCK_A64(); 1339aafc7c0SBrandon Bergren 1349aafc7c0SBrandon Bergren return (tmp == tmp_old); 1359aafc7c0SBrandon Bergren } 1369aafc7c0SBrandon Bergren 137*3a22f09cSKyle Evans #ifdef SMP 1389aafc7c0SBrandon Bergren static void 1399aafc7c0SBrandon Bergren atomic64_mtxinit(void *x __unused) 1409aafc7c0SBrandon Bergren { 1419aafc7c0SBrandon Bergren int i; 1429aafc7c0SBrandon Bergren 1439aafc7c0SBrandon Bergren for (i = 0; i < A64_POOL_SIZE; i++) 1449aafc7c0SBrandon Bergren mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF); 1459aafc7c0SBrandon Bergren } 1469aafc7c0SBrandon Bergren 1479aafc7c0SBrandon Bergren SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL); 148*3a22f09cSKyle Evans #endif /* SMP */ 1499aafc7c0SBrandon Bergren 1509aafc7c0SBrandon Bergren #endif /* _KERNEL */ 151