xref: /freebsd/sys/arm64/include/atomic.h (revision adc56f5a383771f594829b7db9c263b6f0dcf1bd)
1 /*-
2  * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_MACHINE_ATOMIC_H_
30 #define	_MACHINE_ATOMIC_H_
31 
32 #define	isb()		__asm __volatile("isb" : : : "memory")
33 
34 /*
35  * Options for DMB and DSB:
36  *	oshld	Outer Shareable, load
37  *	oshst	Outer Shareable, store
38  *	osh	Outer Shareable, all
39  *	nshld	Non-shareable, load
40  *	nshst	Non-shareable, store
41  *	nsh	Non-shareable, all
42  *	ishld	Inner Shareable, load
43  *	ishst	Inner Shareable, store
44  *	ish	Inner Shareable, all
45  *	ld	Full system, load
46  *	st	Full system, store
47  *	sy	Full system, all
48  */
49 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
50 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
51 
52 #define	mb()	dmb(sy)	/* Full system memory barrier all */
53 #define	wmb()	dmb(st)	/* Full system memory barrier store */
54 #define	rmb()	dmb(ld)	/* Full system memory barrier load */
55 
56 #if defined(KCSAN) && !defined(KCSAN_RUNTIME)
57 #include <sys/_cscan_atomic.h>
58 #else
59 
60 #include <sys/atomic_common.h>
61 
62 #define	ATOMIC_OP(op, asm_op, bar, a, l)				\
63 static __inline void							\
64 atomic_##op##_##bar##8(volatile uint8_t *p, uint8_t val)		\
65 {									\
66 	uint8_t tmp;							\
67 	int res;							\
68 									\
69 	__asm __volatile(						\
70 	    "1: ld"#a"xrb  %w0, [%2]      \n"				\
71 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
72 	    "   st"#l"xrb  %w1, %w0, [%2] \n"				\
73             "   cbnz       %w1, 1b        \n"				\
74 	    : "=&r"(tmp), "=&r"(res)					\
75 	    : "r" (p), "r" (val)					\
76 	    : "memory"							\
77 	);								\
78 }									\
79 									\
80 static __inline void							\
81 atomic_##op##_##bar##16(volatile uint16_t *p, uint16_t val)		\
82 {									\
83 	uint16_t tmp;							\
84 	int res;							\
85 									\
86 	__asm __volatile(						\
87 	    "1: ld"#a"xrh  %w0, [%2]      \n"				\
88 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
89 	    "   st"#l"xrh  %w1, %w0, [%2] \n"				\
90             "   cbnz       %w1, 1b        \n"				\
91 	    : "=&r"(tmp), "=&r"(res)					\
92 	    : "r" (p), "r" (val)					\
93 	    : "memory"							\
94 	);								\
95 }									\
96 									\
97 static __inline void							\
98 atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val)		\
99 {									\
100 	uint32_t tmp;							\
101 	int res;							\
102 									\
103 	__asm __volatile(						\
104 	    "1: ld"#a"xr   %w0, [%2]      \n"				\
105 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
106 	    "   st"#l"xr   %w1, %w0, [%2] \n"				\
107             "   cbnz       %w1, 1b        \n"				\
108 	    : "=&r"(tmp), "=&r"(res)					\
109 	    : "r" (p), "r" (val)					\
110 	    : "memory"							\
111 	);								\
112 }									\
113 									\
114 static __inline void							\
115 atomic_##op##_##bar##64(volatile uint64_t *p, uint64_t val)		\
116 {									\
117 	uint64_t tmp;							\
118 	int res;							\
119 									\
120 	__asm __volatile(						\
121 	    "1: ld"#a"xr   %0, [%2]      \n"				\
122 	    "   "#asm_op"  %0, %0, %3    \n"				\
123 	    "   st"#l"xr   %w1, %0, [%2] \n"				\
124             "   cbnz       %w1, 1b       \n"				\
125 	    : "=&r"(tmp), "=&r"(res)					\
126 	    : "r" (p), "r" (val)					\
127 	    : "memory"							\
128 	);								\
129 }
130 
131 #define	ATOMIC(op, asm_op)						\
132     ATOMIC_OP(op, asm_op,     ,  ,  )					\
133     ATOMIC_OP(op, asm_op, acq_, a,  )					\
134     ATOMIC_OP(op, asm_op, rel_,  , l)					\
135 
136 ATOMIC(add,      add)
137 ATOMIC(clear,    bic)
138 ATOMIC(set,      orr)
139 ATOMIC(subtract, sub)
140 
141 #define	ATOMIC_FCMPSET(bar, a, l)					\
142 static __inline int							\
143 atomic_fcmpset_##bar##8(volatile uint8_t *p, uint8_t *cmpval,		\
144     uint8_t newval)		 					\
145 {									\
146 	uint8_t tmp;							\
147 	uint8_t _cmpval = *cmpval;					\
148 	int res;							\
149 									\
150 	__asm __volatile(						\
151 	    "1: mov      %w1, #1        \n"				\
152 	    "   ld"#a"xrb %w0, [%2]     \n"				\
153 	    "   cmp      %w0, %w3       \n"				\
154 	    "   b.ne     2f             \n"				\
155 	    "   st"#l"xrb %w1, %w4, [%2]\n"				\
156 	    "2:"							\
157 	    : "=&r"(tmp), "=&r"(res)					\
158 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
159 	    : "cc", "memory"						\
160 	);								\
161 	*cmpval = tmp;							\
162 									\
163 	return (!res);							\
164 }									\
165 									\
166 static __inline int							\
167 atomic_fcmpset_##bar##16(volatile uint16_t *p, uint16_t *cmpval,	\
168     uint16_t newval)		 					\
169 {									\
170 	uint16_t tmp;							\
171 	uint16_t _cmpval = *cmpval;					\
172 	int res;							\
173 									\
174 	__asm __volatile(						\
175 	    "1: mov      %w1, #1        \n"				\
176 	    "   ld"#a"xrh %w0, [%2]      \n"				\
177 	    "   cmp      %w0, %w3       \n"				\
178 	    "   b.ne     2f             \n"				\
179 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
180 	    "2:"							\
181 	    : "=&r"(tmp), "=&r"(res)					\
182 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
183 	    : "cc", "memory"						\
184 	);								\
185 	*cmpval = tmp;							\
186 									\
187 	return (!res);							\
188 }									\
189 									\
190 static __inline int							\
191 atomic_fcmpset_##bar##32(volatile uint32_t *p, uint32_t *cmpval,	\
192     uint32_t newval)		 					\
193 {									\
194 	uint32_t tmp;							\
195 	uint32_t _cmpval = *cmpval;					\
196 	int res;							\
197 									\
198 	__asm __volatile(						\
199 	    "1: mov      %w1, #1        \n"				\
200 	    "   ld"#a"xr %w0, [%2]      \n"				\
201 	    "   cmp      %w0, %w3       \n"				\
202 	    "   b.ne     2f             \n"				\
203 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
204 	    "2:"							\
205 	    : "=&r"(tmp), "=&r"(res)					\
206 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
207 	    : "cc", "memory"						\
208 	);								\
209 	*cmpval = tmp;							\
210 									\
211 	return (!res);							\
212 }									\
213 									\
214 static __inline int							\
215 atomic_fcmpset_##bar##64(volatile uint64_t *p, uint64_t *cmpval,	\
216     uint64_t newval)							\
217 {									\
218 	uint64_t tmp;							\
219 	uint64_t _cmpval = *cmpval;					\
220 	int res;							\
221 									\
222 	__asm __volatile(						\
223 	    "1: mov      %w1, #1       \n"				\
224 	    "   ld"#a"xr %0, [%2]      \n"				\
225 	    "   cmp      %0, %3        \n"				\
226 	    "   b.ne     2f            \n"				\
227 	    "   st"#l"xr %w1, %4, [%2] \n"				\
228 	    "2:"							\
229 	    : "=&r"(tmp), "=&r"(res)					\
230 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
231 	    : "cc", "memory"						\
232 	);								\
233 	*cmpval = tmp;							\
234 									\
235 	return (!res);							\
236 }
237 
238 ATOMIC_FCMPSET(    ,  , )
239 ATOMIC_FCMPSET(acq_, a, )
240 ATOMIC_FCMPSET(rel_,  ,l)
241 
242 #undef ATOMIC_FCMPSET
243 
244 #define	ATOMIC_CMPSET(bar, a, l)					\
245 static __inline int							\
246 atomic_cmpset_##bar##8(volatile uint8_t *p, uint8_t cmpval,		\
247     uint8_t newval)							\
248 {									\
249 	uint8_t tmp;							\
250 	int res;							\
251 									\
252 	__asm __volatile(						\
253 	    "1: mov       %w1, #1        \n"				\
254 	    "   ld"#a"xrb %w0, [%2]      \n"				\
255 	    "   cmp       %w0, %w3       \n"				\
256 	    "   b.ne      2f             \n"				\
257 	    "   st"#l"xrb %w1, %w4, [%2] \n"				\
258             "   cbnz      %w1, 1b        \n"				\
259 	    "2:"							\
260 	    : "=&r"(tmp), "=&r"(res)					\
261 	    : "r" (p), "r" (cmpval), "r" (newval)			\
262 	    : "cc", "memory"							\
263 	);								\
264 									\
265 	return (!res);							\
266 }									\
267 									\
268 static __inline int							\
269 atomic_cmpset_##bar##16(volatile uint16_t *p, uint16_t cmpval,		\
270     uint16_t newval)							\
271 {									\
272 	uint16_t tmp;							\
273 	int res;							\
274 									\
275 	__asm __volatile(						\
276 	    "1: mov       %w1, #1        \n"				\
277 	    "   ld"#a"xrh %w0, [%2]      \n"				\
278 	    "   cmp       %w0, %w3       \n"				\
279 	    "   b.ne      2f             \n"				\
280 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
281             "   cbnz      %w1, 1b        \n"				\
282 	    "2:"							\
283 	    : "=&r"(tmp), "=&r"(res)					\
284 	    : "r" (p), "r" (cmpval), "r" (newval)			\
285 	    : "cc", "memory"							\
286 	);								\
287 									\
288 	return (!res);							\
289 }									\
290 									\
291 static __inline int							\
292 atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval,		\
293     uint32_t newval)							\
294 {									\
295 	uint32_t tmp;							\
296 	int res;							\
297 									\
298 	__asm __volatile(						\
299 	    "1: mov      %w1, #1        \n"				\
300 	    "   ld"#a"xr %w0, [%2]      \n"				\
301 	    "   cmp      %w0, %w3       \n"				\
302 	    "   b.ne     2f             \n"				\
303 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
304             "   cbnz     %w1, 1b        \n"				\
305 	    "2:"							\
306 	    : "=&r"(tmp), "=&r"(res)					\
307 	    : "r" (p), "r" (cmpval), "r" (newval)			\
308 	    : "cc", "memory"							\
309 	);								\
310 									\
311 	return (!res);							\
312 }									\
313 									\
314 static __inline int							\
315 atomic_cmpset_##bar##64(volatile uint64_t *p, uint64_t cmpval,		\
316     uint64_t newval)							\
317 {									\
318 	uint64_t tmp;							\
319 	int res;							\
320 									\
321 	__asm __volatile(						\
322 	    "1: mov      %w1, #1       \n"				\
323 	    "   ld"#a"xr %0, [%2]      \n"				\
324 	    "   cmp      %0, %3        \n"				\
325 	    "   b.ne     2f            \n"				\
326 	    "   st"#l"xr %w1, %4, [%2] \n"				\
327             "   cbnz     %w1, 1b       \n"				\
328 	    "2:"							\
329 	    : "=&r"(tmp), "=&r"(res)					\
330 	    : "r" (p), "r" (cmpval), "r" (newval)			\
331 	    : "cc", "memory"							\
332 	);								\
333 									\
334 	return (!res);							\
335 }
336 
337 ATOMIC_CMPSET(    ,  , )
338 ATOMIC_CMPSET(acq_, a, )
339 ATOMIC_CMPSET(rel_,  ,l)
340 
341 static __inline uint32_t
342 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
343 {
344 	uint32_t tmp, ret;
345 	int res;
346 
347 	__asm __volatile(
348 	    "1: ldxr	%w2, [%3]      \n"
349 	    "   add	%w0, %w2, %w4  \n"
350 	    "   stxr	%w1, %w0, [%3] \n"
351             "   cbnz	%w1, 1b        \n"
352 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
353 	    : "r" (p), "r" (val)
354 	    : "memory"
355 	);
356 
357 	return (ret);
358 }
359 
360 static __inline uint64_t
361 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
362 {
363 	uint64_t tmp, ret;
364 	int res;
365 
366 	__asm __volatile(
367 	    "1: ldxr	%2, [%3]      \n"
368 	    "   add	%0, %2, %4    \n"
369 	    "   stxr	%w1, %0, [%3] \n"
370             "   cbnz	%w1, 1b       \n"
371 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
372 	    : "r" (p), "r" (val)
373 	    : "memory"
374 	);
375 
376 	return (ret);
377 }
378 
379 static __inline uint32_t
380 atomic_readandclear_32(volatile uint32_t *p)
381 {
382 	uint32_t ret;
383 	int res;
384 
385 	__asm __volatile(
386 	    "1: ldxr	%w1, [%2]      \n"
387 	    "   stxr	%w0, wzr, [%2] \n"
388             "   cbnz	%w0, 1b        \n"
389 	    : "=&r"(res), "=&r"(ret)
390 	    : "r" (p)
391 	    : "memory"
392 	);
393 
394 	return (ret);
395 }
396 
397 static __inline uint64_t
398 atomic_readandclear_64(volatile uint64_t *p)
399 {
400 	uint64_t ret;
401 	int res;
402 
403 	__asm __volatile(
404 	    "1: ldxr	%1, [%2]      \n"
405 	    "   stxr	%w0, xzr, [%2] \n"
406             "   cbnz	%w0, 1b        \n"
407 	    : "=&r"(res), "=&r"(ret)
408 	    : "r" (p)
409 	    : "memory"
410 	);
411 
412 	return (ret);
413 }
414 
415 static __inline uint32_t
416 atomic_swap_32(volatile uint32_t *p, uint32_t val)
417 {
418 	uint32_t ret;
419 	int res;
420 
421 	__asm __volatile(
422 	    "1: ldxr	%w0, [%2]      \n"
423 	    "   stxr	%w1, %w3, [%2] \n"
424 	    "   cbnz	%w1, 1b        \n"
425 	    : "=&r"(ret), "=&r"(res)
426 	    : "r" (p), "r" (val)
427 	    : "memory"
428 	);
429 
430 	return (ret);
431 }
432 
433 static __inline uint64_t
434 atomic_swap_64(volatile uint64_t *p, uint64_t val)
435 {
436 	uint64_t ret;
437 	int res;
438 
439 	__asm __volatile(
440 	    "1: ldxr	%0, [%2]      \n"
441 	    "   stxr	%w1, %3, [%2] \n"
442 	    "   cbnz	%w1, 1b       \n"
443 	    : "=&r"(ret), "=&r"(res)
444 	    : "r" (p), "r" (val)
445 	    : "memory"
446 	);
447 
448 	return (ret);
449 }
450 
451 static __inline uint32_t
452 atomic_load_acq_32(volatile uint32_t *p)
453 {
454 	uint32_t ret;
455 
456 	__asm __volatile(
457 	    "ldar	%w0, [%1] \n"
458 	    : "=&r" (ret)
459 	    : "r" (p)
460 	    : "memory");
461 
462 	return (ret);
463 }
464 
465 static __inline uint64_t
466 atomic_load_acq_64(volatile uint64_t *p)
467 {
468 	uint64_t ret;
469 
470 	__asm __volatile(
471 	    "ldar	%0, [%1] \n"
472 	    : "=&r" (ret)
473 	    : "r" (p)
474 	    : "memory");
475 
476 	return (ret);
477 }
478 
479 static __inline void
480 atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
481 {
482 
483 	__asm __volatile(
484 	    "stlr	%w0, [%1] \n"
485 	    :
486 	    : "r" (val), "r" (p)
487 	    : "memory");
488 }
489 
490 static __inline void
491 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
492 {
493 
494 	__asm __volatile(
495 	    "stlr	%0, [%1] \n"
496 	    :
497 	    : "r" (val), "r" (p)
498 	    : "memory");
499 }
500 
501 
502 #define	atomic_add_int			atomic_add_32
503 #define	atomic_fcmpset_int		atomic_fcmpset_32
504 #define	atomic_clear_int		atomic_clear_32
505 #define	atomic_cmpset_int		atomic_cmpset_32
506 #define	atomic_fetchadd_int		atomic_fetchadd_32
507 #define	atomic_readandclear_int		atomic_readandclear_32
508 #define	atomic_set_int			atomic_set_32
509 #define	atomic_swap_int			atomic_swap_32
510 #define	atomic_subtract_int		atomic_subtract_32
511 
512 #define	atomic_add_acq_int		atomic_add_acq_32
513 #define	atomic_fcmpset_acq_int		atomic_fcmpset_acq_32
514 #define	atomic_clear_acq_int		atomic_clear_acq_32
515 #define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
516 #define	atomic_load_acq_int		atomic_load_acq_32
517 #define	atomic_set_acq_int		atomic_set_acq_32
518 #define	atomic_subtract_acq_int		atomic_subtract_acq_32
519 
520 #define	atomic_add_rel_int		atomic_add_rel_32
521 #define	atomic_fcmpset_rel_int		atomic_fcmpset_rel_32
522 #define	atomic_clear_rel_int		atomic_clear_rel_32
523 #define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
524 #define	atomic_set_rel_int		atomic_set_rel_32
525 #define	atomic_subtract_rel_int		atomic_subtract_rel_32
526 #define	atomic_store_rel_int		atomic_store_rel_32
527 
528 #define	atomic_add_long			atomic_add_64
529 #define	atomic_fcmpset_long		atomic_fcmpset_64
530 #define	atomic_clear_long		atomic_clear_64
531 #define	atomic_cmpset_long		atomic_cmpset_64
532 #define	atomic_fetchadd_long		atomic_fetchadd_64
533 #define	atomic_readandclear_long	atomic_readandclear_64
534 #define	atomic_set_long			atomic_set_64
535 #define	atomic_swap_long		atomic_swap_64
536 #define	atomic_subtract_long		atomic_subtract_64
537 
538 #define	atomic_add_ptr			atomic_add_64
539 #define	atomic_fcmpset_ptr		atomic_fcmpset_64
540 #define	atomic_clear_ptr		atomic_clear_64
541 #define	atomic_cmpset_ptr		atomic_cmpset_64
542 #define	atomic_fetchadd_ptr		atomic_fetchadd_64
543 #define	atomic_readandclear_ptr		atomic_readandclear_64
544 #define	atomic_set_ptr			atomic_set_64
545 #define	atomic_swap_ptr			atomic_swap_64
546 #define	atomic_subtract_ptr		atomic_subtract_64
547 
548 #define	atomic_add_acq_long		atomic_add_acq_64
549 #define	atomic_fcmpset_acq_long		atomic_fcmpset_acq_64
550 #define	atomic_clear_acq_long		atomic_clear_acq_64
551 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
552 #define	atomic_load_acq_long		atomic_load_acq_64
553 #define	atomic_set_acq_long		atomic_set_acq_64
554 #define	atomic_subtract_acq_long	atomic_subtract_acq_64
555 
556 #define	atomic_add_acq_ptr		atomic_add_acq_64
557 #define	atomic_fcmpset_acq_ptr		atomic_fcmpset_acq_64
558 #define	atomic_clear_acq_ptr		atomic_clear_acq_64
559 #define	atomic_cmpset_acq_ptr		atomic_cmpset_acq_64
560 #define	atomic_load_acq_ptr		atomic_load_acq_64
561 #define	atomic_set_acq_ptr		atomic_set_acq_64
562 #define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
563 
564 #define	atomic_add_rel_long		atomic_add_rel_64
565 #define	atomic_fcmpset_rel_long		atomic_fcmpset_rel_64
566 #define	atomic_clear_rel_long		atomic_clear_rel_64
567 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
568 #define	atomic_set_rel_long		atomic_set_rel_64
569 #define	atomic_subtract_rel_long	atomic_subtract_rel_64
570 #define	atomic_store_rel_long		atomic_store_rel_64
571 
572 #define	atomic_add_rel_ptr		atomic_add_rel_64
573 #define	atomic_fcmpset_rel_ptr		atomic_fcmpset_rel_64
574 #define	atomic_clear_rel_ptr		atomic_clear_rel_64
575 #define	atomic_cmpset_rel_ptr		atomic_cmpset_rel_64
576 #define	atomic_set_rel_ptr		atomic_set_rel_64
577 #define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
578 #define	atomic_store_rel_ptr		atomic_store_rel_64
579 
580 static __inline void
581 atomic_thread_fence_acq(void)
582 {
583 
584 	dmb(ld);
585 }
586 
587 static __inline void
588 atomic_thread_fence_rel(void)
589 {
590 
591 	dmb(sy);
592 }
593 
594 static __inline void
595 atomic_thread_fence_acq_rel(void)
596 {
597 
598 	dmb(sy);
599 }
600 
601 static __inline void
602 atomic_thread_fence_seq_cst(void)
603 {
604 
605 	dmb(sy);
606 }
607 
608 #endif /* KCSAN && !KCSAN_RUNTIME */
609 
610 #endif /* _MACHINE_ATOMIC_H_ */
611 
612