xref: /freebsd/sys/kern/subr_atomic64.c (revision 9aafc7c0523456c8b5ee8919c97f75277cf4d70b)
1*9aafc7c0SBrandon Bergren /*-
2*9aafc7c0SBrandon Bergren  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*9aafc7c0SBrandon Bergren  *
4*9aafc7c0SBrandon Bergren  * Copyright (c) 2019 Justin Hibbits
5*9aafc7c0SBrandon Bergren  *
6*9aafc7c0SBrandon Bergren  * Redistribution and use in source and binary forms, with or without
7*9aafc7c0SBrandon Bergren  * modification, are permitted provided that the following conditions
8*9aafc7c0SBrandon Bergren  * are met:
9*9aafc7c0SBrandon Bergren  * 1. Redistributions of source code must retain the above copyright
10*9aafc7c0SBrandon Bergren  *    notice, this list of conditions and the following disclaimer.
11*9aafc7c0SBrandon Bergren  * 2. Redistributions in binary form must reproduce the above copyright
12*9aafc7c0SBrandon Bergren  *    notice, this list of conditions and the following disclaimer in the
13*9aafc7c0SBrandon Bergren  *    documentation and/or other materials provided with the distribution.
14*9aafc7c0SBrandon Bergren  *
15*9aafc7c0SBrandon Bergren  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*9aafc7c0SBrandon Bergren  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*9aafc7c0SBrandon Bergren  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*9aafc7c0SBrandon Bergren  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*9aafc7c0SBrandon Bergren  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*9aafc7c0SBrandon Bergren  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*9aafc7c0SBrandon Bergren  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*9aafc7c0SBrandon Bergren  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*9aafc7c0SBrandon Bergren  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*9aafc7c0SBrandon Bergren  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*9aafc7c0SBrandon Bergren  * SUCH DAMAGE.
26*9aafc7c0SBrandon Bergren  *
27*9aafc7c0SBrandon Bergren  * $FreeBSD$
28*9aafc7c0SBrandon Bergren  */
29*9aafc7c0SBrandon Bergren 
30*9aafc7c0SBrandon Bergren #include <sys/cdefs.h>
31*9aafc7c0SBrandon Bergren #include <sys/param.h>
32*9aafc7c0SBrandon Bergren #include <sys/kernel.h>
33*9aafc7c0SBrandon Bergren #include <sys/lock.h>
34*9aafc7c0SBrandon Bergren #include <sys/mutex.h>
35*9aafc7c0SBrandon Bergren #include <sys/smp.h>
36*9aafc7c0SBrandon Bergren #include <sys/systm.h>
37*9aafc7c0SBrandon Bergren 
38*9aafc7c0SBrandon Bergren #include <machine/atomic.h>
39*9aafc7c0SBrandon Bergren #include <machine/param.h>
40*9aafc7c0SBrandon Bergren 
41*9aafc7c0SBrandon Bergren #include <vm/vm.h>
42*9aafc7c0SBrandon Bergren #include <vm/pmap.h>
43*9aafc7c0SBrandon Bergren 
44*9aafc7c0SBrandon Bergren enum {
45*9aafc7c0SBrandon Bergren     ATOMIC64_ADD,
46*9aafc7c0SBrandon Bergren     ATOMIC64_CLEAR,
47*9aafc7c0SBrandon Bergren     ATOMIC64_CMPSET,
48*9aafc7c0SBrandon Bergren     ATOMIC64_FCMPSET,
49*9aafc7c0SBrandon Bergren     ATOMIC64_FETCHADD,
50*9aafc7c0SBrandon Bergren     ATOMIC64_LOAD,
51*9aafc7c0SBrandon Bergren     ATOMIC64_SET,
52*9aafc7c0SBrandon Bergren     ATOMIC64_SUBTRACT,
53*9aafc7c0SBrandon Bergren     ATOMIC64_STORE,
54*9aafc7c0SBrandon Bergren     ATOMIC64_SWAP
55*9aafc7c0SBrandon Bergren };
56*9aafc7c0SBrandon Bergren 
57*9aafc7c0SBrandon Bergren #ifdef _KERNEL
58*9aafc7c0SBrandon Bergren #define	A64_POOL_SIZE	MAXCPU
59*9aafc7c0SBrandon Bergren /* Estimated size of a cacheline */
60*9aafc7c0SBrandon Bergren #define	CACHE_ALIGN	CACHE_LINE_SIZE
61*9aafc7c0SBrandon Bergren 
62*9aafc7c0SBrandon Bergren #define GET_MUTEX(p) \
63*9aafc7c0SBrandon Bergren     (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)])
64*9aafc7c0SBrandon Bergren 
65*9aafc7c0SBrandon Bergren #define LOCK_A64()			\
66*9aafc7c0SBrandon Bergren     struct mtx *_amtx = GET_MUTEX(p);	\
67*9aafc7c0SBrandon Bergren     if (smp_started) mtx_lock(_amtx)
68*9aafc7c0SBrandon Bergren 
69*9aafc7c0SBrandon Bergren #define UNLOCK_A64()	if (smp_started) mtx_unlock(_amtx)
70*9aafc7c0SBrandon Bergren 
71*9aafc7c0SBrandon Bergren #define ATOMIC64_EMU_UN(op, rt, block, ret) \
72*9aafc7c0SBrandon Bergren     rt \
73*9aafc7c0SBrandon Bergren     atomic_##op##_64(volatile u_int64_t *p) {			\
74*9aafc7c0SBrandon Bergren 	u_int64_t tmp __unused;					\
75*9aafc7c0SBrandon Bergren 	LOCK_A64();						\
76*9aafc7c0SBrandon Bergren 	block;							\
77*9aafc7c0SBrandon Bergren 	UNLOCK_A64();						\
78*9aafc7c0SBrandon Bergren 	ret; } struct hack
79*9aafc7c0SBrandon Bergren 
80*9aafc7c0SBrandon Bergren #define	ATOMIC64_EMU_BIN(op, rt, block, ret) \
81*9aafc7c0SBrandon Bergren     rt \
82*9aafc7c0SBrandon Bergren     atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) {	\
83*9aafc7c0SBrandon Bergren 	u_int64_t tmp __unused;					\
84*9aafc7c0SBrandon Bergren 	LOCK_A64();						\
85*9aafc7c0SBrandon Bergren 	block;							\
86*9aafc7c0SBrandon Bergren 	UNLOCK_A64();						\
87*9aafc7c0SBrandon Bergren 	ret; } struct hack
88*9aafc7c0SBrandon Bergren 
89*9aafc7c0SBrandon Bergren static struct mtx a64_mtx_pool[A64_POOL_SIZE];
90*9aafc7c0SBrandon Bergren 
91*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return);
92*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return);
93*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v));
94*9aafc7c0SBrandon Bergren ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp));
95*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(set, void, *p |= v, return);
96*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return);
97*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(store, void, *p = v, return);
98*9aafc7c0SBrandon Bergren ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v));
99*9aafc7c0SBrandon Bergren 
100*9aafc7c0SBrandon Bergren int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new)
101*9aafc7c0SBrandon Bergren {
102*9aafc7c0SBrandon Bergren 	u_int64_t tmp;
103*9aafc7c0SBrandon Bergren 
104*9aafc7c0SBrandon Bergren 	LOCK_A64();
105*9aafc7c0SBrandon Bergren 	tmp = *p;
106*9aafc7c0SBrandon Bergren 	if (tmp == old)
107*9aafc7c0SBrandon Bergren 		*p = new;
108*9aafc7c0SBrandon Bergren 	UNLOCK_A64();
109*9aafc7c0SBrandon Bergren 
110*9aafc7c0SBrandon Bergren 	return (tmp == old);
111*9aafc7c0SBrandon Bergren }
112*9aafc7c0SBrandon Bergren 
113*9aafc7c0SBrandon Bergren int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new)
114*9aafc7c0SBrandon Bergren {
115*9aafc7c0SBrandon Bergren 	u_int64_t tmp, tmp_old;
116*9aafc7c0SBrandon Bergren 
117*9aafc7c0SBrandon Bergren 	LOCK_A64();
118*9aafc7c0SBrandon Bergren 	tmp = *p;
119*9aafc7c0SBrandon Bergren 	tmp_old = *old;
120*9aafc7c0SBrandon Bergren 	if (tmp == tmp_old)
121*9aafc7c0SBrandon Bergren 		*p = new;
122*9aafc7c0SBrandon Bergren 	else
123*9aafc7c0SBrandon Bergren 		*old = tmp;
124*9aafc7c0SBrandon Bergren 	UNLOCK_A64();
125*9aafc7c0SBrandon Bergren 
126*9aafc7c0SBrandon Bergren 	return (tmp == tmp_old);
127*9aafc7c0SBrandon Bergren }
128*9aafc7c0SBrandon Bergren 
129*9aafc7c0SBrandon Bergren static void
130*9aafc7c0SBrandon Bergren atomic64_mtxinit(void *x __unused)
131*9aafc7c0SBrandon Bergren {
132*9aafc7c0SBrandon Bergren 	int i;
133*9aafc7c0SBrandon Bergren 
134*9aafc7c0SBrandon Bergren 	for (i = 0; i < A64_POOL_SIZE; i++)
135*9aafc7c0SBrandon Bergren 		mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF);
136*9aafc7c0SBrandon Bergren }
137*9aafc7c0SBrandon Bergren 
138*9aafc7c0SBrandon Bergren SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL);
139*9aafc7c0SBrandon Bergren 
140*9aafc7c0SBrandon Bergren #endif	/* _KERNEL */
141