xref: /freebsd/sys/kern/subr_atomic64.c (revision 61ba55bcf70f2340f9c943c9571113b3fd8eda69)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Justin Hibbits
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/lock.h>
32 #include <sys/mutex.h>
33 #include <sys/smp.h>
34 #include <sys/systm.h>
35 
36 #include <machine/atomic.h>
37 #include <machine/param.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 enum {
43     ATOMIC64_ADD,
44     ATOMIC64_CLEAR,
45     ATOMIC64_CMPSET,
46     ATOMIC64_FCMPSET,
47     ATOMIC64_FETCHADD,
48     ATOMIC64_LOAD,
49     ATOMIC64_SET,
50     ATOMIC64_SUBTRACT,
51     ATOMIC64_STORE,
52     ATOMIC64_SWAP
53 };
54 
55 #ifdef _KERNEL
56 #ifdef SMP
57 
58 #define	A64_POOL_SIZE	MAXCPU
59 /* Estimated size of a cacheline */
60 #define	CACHE_ALIGN	CACHE_LINE_SIZE
61 static struct mtx a64_mtx_pool[A64_POOL_SIZE];
62 
63 #define GET_MUTEX(p) \
64     (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)])
65 
66 #define LOCK_A64()			\
67     struct mtx *_amtx = GET_MUTEX(p);	\
68     if (smp_started) mtx_lock(_amtx)
69 
70 #define UNLOCK_A64()	if (smp_started) mtx_unlock(_amtx)
71 
72 #else	/* !SMP */
73 
74 #define	LOCK_A64()	{ register_t s = intr_disable()
75 #define	UNLOCK_A64()	intr_restore(s); }
76 
77 #endif	/* SMP */
78 
79 #define ATOMIC64_EMU_UN(op, rt, block, ret) \
80     rt \
81     atomic_##op##_64(volatile uint64_t *p) {			\
82 	uint64_t tmp __unused;					\
83 	LOCK_A64();						\
84 	block;							\
85 	UNLOCK_A64();						\
86 	ret; } struct hack
87 
88 #define	ATOMIC64_EMU_BIN(op, rt, block, ret) \
89     rt \
90     atomic_##op##_64(volatile uint64_t *p, uint64_t v) {	\
91 	uint64_t tmp __unused;					\
92 	LOCK_A64();						\
93 	block;							\
94 	UNLOCK_A64();						\
95 	ret; } struct hack
96 
97 ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return);
98 ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return);
99 ATOMIC64_EMU_BIN(fetchadd, uint64_t, (*p = *p + v, v = *p - v), return (v));
100 ATOMIC64_EMU_UN(load, uint64_t, (tmp = *p), return (tmp));
101 ATOMIC64_EMU_BIN(set, void, *p |= v, return);
102 ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return);
103 ATOMIC64_EMU_BIN(store, void, *p = v, return);
104 ATOMIC64_EMU_BIN(swap, uint64_t, tmp = *p; *p = v; v = tmp, return(v));
105 
106 int atomic_cmpset_64(volatile uint64_t *p, uint64_t old, uint64_t new)
107 {
108 	uint64_t tmp;
109 
110 	LOCK_A64();
111 	tmp = *p;
112 	if (tmp == old)
113 		*p = new;
114 	UNLOCK_A64();
115 
116 	return (tmp == old);
117 }
118 
119 int atomic_fcmpset_64(volatile uint64_t *p, uint64_t *old, uint64_t new)
120 {
121 	uint64_t tmp, tmp_old;
122 
123 	LOCK_A64();
124 	tmp = *p;
125 	tmp_old = *old;
126 	if (tmp == tmp_old)
127 		*p = new;
128 	else
129 		*old = tmp;
130 	UNLOCK_A64();
131 
132 	return (tmp == tmp_old);
133 }
134 
135 #ifdef SMP
136 static void
137 atomic64_mtxinit(void *x __unused)
138 {
139 	int i;
140 
141 	for (i = 0; i < A64_POOL_SIZE; i++)
142 		mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF);
143 }
144 
145 SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL);
146 #endif	/* SMP */
147 
148 #endif	/* _KERNEL */
149