1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Kyle Evans <kevans@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27 #ifndef _SYS__ATOMIC_SUBWORD_H_
28 #define _SYS__ATOMIC_SUBWORD_H_
29
30 /*
31 * This header is specifically for platforms that either do not have ways to or
32 * simply do not do sub-word atomic operations. These are not ideal as they
33 * require a little more effort to make sure our atomic operations are failing
34 * because of the bits of the word we're trying to write rather than the rest
35 * of the word.
36 */
37 #ifndef _MACHINE_ATOMIC_H_
38 #error do not include this header, use machine/atomic.h
39 #endif
40
41 #include <machine/endian.h>
42 #ifndef _KERNEL
43 #include <stdbool.h>
44 #endif
45
46 #ifndef NBBY
47 #define NBBY 8
48 #endif
49
50 #define _ATOMIC_WORD_ALIGNED(p) \
51 (uint32_t *)((__uintptr_t)(p) - ((__uintptr_t)(p) % 4))
52
53 #if _BYTE_ORDER == _BIG_ENDIAN
54 #define _ATOMIC_BYTE_SHIFT(p) \
55 ((3 - ((__uintptr_t)(p) % 4)) * NBBY)
56
57 #define _ATOMIC_HWORD_SHIFT(p) \
58 ((2 - ((__uintptr_t)(p) % 4)) * NBBY)
59 #else
60 #define _ATOMIC_BYTE_SHIFT(p) \
61 ((((__uintptr_t)(p) % 4)) * NBBY)
62
63 #define _ATOMIC_HWORD_SHIFT(p) \
64 ((((__uintptr_t)(p) % 4)) * NBBY)
65 #endif
66
67 #ifndef _atomic_cmpset_masked_word
68 /*
69 * Pass these bad boys a couple words and a mask of the bits you care about,
70 * they'll loop until we either succeed or fail because of those bits rather
71 * than the ones we're not masking. old and val should already be preshifted to
72 * the proper position.
73 */
74 static __inline int
_atomic_cmpset_masked_word(uint32_t * addr,uint32_t old,uint32_t val,uint32_t mask)75 _atomic_cmpset_masked_word(uint32_t *addr, uint32_t old, uint32_t val,
76 uint32_t mask)
77 {
78 int ret;
79 uint32_t wcomp;
80
81 wcomp = old;
82
83 /*
84 * We'll attempt the cmpset on the entire word. Loop here in case the
85 * operation fails due to the other half-word resident in that word,
86 * rather than the half-word we're trying to operate on. Ideally we
87 * only take one trip through here. We'll have to recalculate the old
88 * value since it's the other part of the word changing.
89 */
90 do {
91 old = (*addr & ~mask) | wcomp;
92 ret = atomic_fcmpset_32(addr, &old, (old & ~mask) | val);
93 } while (ret == 0 && (old & mask) == wcomp);
94
95 return (ret);
96 }
97 #endif
98
99 #ifndef _atomic_fcmpset_masked_word
100 static __inline int
_atomic_fcmpset_masked_word(uint32_t * addr,uint32_t * old,uint32_t val,uint32_t mask)101 _atomic_fcmpset_masked_word(uint32_t *addr, uint32_t *old, uint32_t val,
102 uint32_t mask)
103 {
104
105 /*
106 * fcmpset_* is documented in atomic(9) to allow spurious failures where
107 * *old == val on ll/sc architectures because the sc may fail due to
108 * parallel writes or other reasons. We take advantage of that here
109 * and only attempt once, because the caller should be compensating for
110 * that possibility.
111 */
112 *old = (*addr & ~mask) | *old;
113 return (atomic_fcmpset_32(addr, old, (*old & ~mask) | val));
114 }
115 #endif
116
117 #ifndef atomic_cmpset_8
118 static __inline int
atomic_cmpset_8(__volatile uint8_t * addr,uint8_t old,uint8_t val)119 atomic_cmpset_8(__volatile uint8_t *addr, uint8_t old, uint8_t val)
120 {
121 int shift;
122
123 shift = _ATOMIC_BYTE_SHIFT(addr);
124
125 return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
126 old << shift, val << shift, 0xff << shift));
127 }
128 #endif
129
130 #ifndef atomic_fcmpset_8
131 static __inline int
atomic_fcmpset_8(__volatile uint8_t * addr,uint8_t * old,uint8_t val)132 atomic_fcmpset_8(__volatile uint8_t *addr, uint8_t *old, uint8_t val)
133 {
134 int ret, shift;
135 uint32_t wold;
136
137 shift = _ATOMIC_BYTE_SHIFT(addr);
138 wold = *old << shift;
139 ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
140 &wold, val << shift, 0xff << shift);
141 if (ret == 0)
142 *old = (wold >> shift) & 0xff;
143 return (ret);
144 }
145 #endif
146
147 #ifndef atomic_cmpset_16
148 static __inline int
atomic_cmpset_16(__volatile uint16_t * addr,uint16_t old,uint16_t val)149 atomic_cmpset_16(__volatile uint16_t *addr, uint16_t old, uint16_t val)
150 {
151 int shift;
152
153 shift = _ATOMIC_HWORD_SHIFT(addr);
154
155 return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
156 old << shift, val << shift, 0xffff << shift));
157 }
158 #endif
159
160 #ifndef atomic_fcmpset_16
161 static __inline int
atomic_fcmpset_16(__volatile uint16_t * addr,uint16_t * old,uint16_t val)162 atomic_fcmpset_16(__volatile uint16_t *addr, uint16_t *old, uint16_t val)
163 {
164 int ret, shift;
165 uint32_t wold;
166
167 shift = _ATOMIC_HWORD_SHIFT(addr);
168 wold = *old << shift;
169 ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
170 &wold, val << shift, 0xffff << shift);
171 if (ret == 0)
172 *old = (wold >> shift) & 0xffff;
173 return (ret);
174 }
175 #endif
176
177 #ifndef atomic_load_acq_8
178 static __inline uint8_t
atomic_load_acq_8(const volatile uint8_t * p)179 atomic_load_acq_8(const volatile uint8_t *p)
180 {
181 int shift;
182 uint8_t ret;
183
184 shift = _ATOMIC_BYTE_SHIFT(p);
185 ret = (atomic_load_acq_32(_ATOMIC_WORD_ALIGNED(p)) >> shift) & 0xff;
186 return (ret);
187 }
188 #endif
189
190 #ifndef atomic_load_acq_16
191 static __inline uint16_t
atomic_load_acq_16(const volatile uint16_t * p)192 atomic_load_acq_16(const volatile uint16_t *p)
193 {
194 int shift;
195 uint16_t ret;
196
197 shift = _ATOMIC_HWORD_SHIFT(p);
198 ret = (atomic_load_acq_32(_ATOMIC_WORD_ALIGNED(p)) >> shift) &
199 0xffff;
200 return (ret);
201 }
202 #endif
203
204 #undef _ATOMIC_WORD_ALIGNED
205 #undef _ATOMIC_BYTE_SHIFT
206 #undef _ATOMIC_HWORD_SHIFT
207
208 #endif /* _SYS__ATOMIC_SUBWORD_H_ */
209