xref: /freebsd/sys/arm64/include/atomic.h (revision 2dd94b045e8c069c1a748d40d30d979e30e02fc9)
1 /*-
2  * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_MACHINE_ATOMIC_H_
30 #define	_MACHINE_ATOMIC_H_
31 
32 #define	isb()		__asm __volatile("isb" : : : "memory")
33 
34 /*
35  * Options for DMB and DSB:
36  *	oshld	Outer Shareable, load
37  *	oshst	Outer Shareable, store
38  *	osh	Outer Shareable, all
39  *	nshld	Non-shareable, load
40  *	nshst	Non-shareable, store
41  *	nsh	Non-shareable, all
42  *	ishld	Inner Shareable, load
43  *	ishst	Inner Shareable, store
44  *	ish	Inner Shareable, all
45  *	ld	Full system, load
46  *	st	Full system, store
47  *	sy	Full system, all
48  */
49 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
50 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
51 
52 #define	mb()	dmb(sy)	/* Full system memory barrier all */
53 #define	wmb()	dmb(st)	/* Full system memory barrier store */
54 #define	rmb()	dmb(ld)	/* Full system memory barrier load */
55 
56 #if defined(KCSAN) && !defined(KCSAN_RUNTIME)
57 #include <sys/_cscan_atomic.h>
58 #else
59 
60 #include <sys/atomic_common.h>
61 
62 #define	ATOMIC_OP(op, asm_op, bar, a, l)				\
63 static __inline void							\
64 atomic_##op##_##bar##8(volatile uint8_t *p, uint8_t val)		\
65 {									\
66 	uint8_t tmp;							\
67 	int res;							\
68 									\
69 	__asm __volatile(						\
70 	    "1: ld"#a"xrb  %w0, [%2]      \n"				\
71 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
72 	    "   st"#l"xrb  %w1, %w0, [%2] \n"				\
73             "   cbnz       %w1, 1b        \n"				\
74 	    : "=&r"(tmp), "=&r"(res)					\
75 	    : "r" (p), "r" (val)					\
76 	    : "memory"							\
77 	);								\
78 }									\
79 									\
80 static __inline void							\
81 atomic_##op##_##bar##16(volatile uint16_t *p, uint16_t val)		\
82 {									\
83 	uint16_t tmp;							\
84 	int res;							\
85 									\
86 	__asm __volatile(						\
87 	    "1: ld"#a"xrh  %w0, [%2]      \n"				\
88 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
89 	    "   st"#l"xrh  %w1, %w0, [%2] \n"				\
90             "   cbnz       %w1, 1b        \n"				\
91 	    : "=&r"(tmp), "=&r"(res)					\
92 	    : "r" (p), "r" (val)					\
93 	    : "memory"							\
94 	);								\
95 }									\
96 									\
97 static __inline void							\
98 atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val)		\
99 {									\
100 	uint32_t tmp;							\
101 	int res;							\
102 									\
103 	__asm __volatile(						\
104 	    "1: ld"#a"xr   %w0, [%2]      \n"				\
105 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
106 	    "   st"#l"xr   %w1, %w0, [%2] \n"				\
107             "   cbnz       %w1, 1b        \n"				\
108 	    : "=&r"(tmp), "=&r"(res)					\
109 	    : "r" (p), "r" (val)					\
110 	    : "memory"							\
111 	);								\
112 }									\
113 									\
114 static __inline void							\
115 atomic_##op##_##bar##64(volatile uint64_t *p, uint64_t val)		\
116 {									\
117 	uint64_t tmp;							\
118 	int res;							\
119 									\
120 	__asm __volatile(						\
121 	    "1: ld"#a"xr   %0, [%2]      \n"				\
122 	    "   "#asm_op"  %0, %0, %3    \n"				\
123 	    "   st"#l"xr   %w1, %0, [%2] \n"				\
124             "   cbnz       %w1, 1b       \n"				\
125 	    : "=&r"(tmp), "=&r"(res)					\
126 	    : "r" (p), "r" (val)					\
127 	    : "memory"							\
128 	);								\
129 }
130 
131 #define	ATOMIC(op, asm_op)						\
132     ATOMIC_OP(op, asm_op,     ,  ,  )					\
133     ATOMIC_OP(op, asm_op, acq_, a,  )					\
134     ATOMIC_OP(op, asm_op, rel_,  , l)					\
135 
136 ATOMIC(add,      add)
137 ATOMIC(clear,    bic)
138 ATOMIC(set,      orr)
139 ATOMIC(subtract, sub)
140 
141 #define	ATOMIC_FCMPSET(bar, a, l)					\
142 static __inline int							\
143 atomic_fcmpset_##bar##8(volatile uint8_t *p, uint8_t *cmpval,		\
144     uint8_t newval)		 					\
145 {									\
146 	uint8_t tmp;							\
147 	uint8_t _cmpval = *cmpval;					\
148 	int res;							\
149 									\
150 	__asm __volatile(						\
151 	    "1: mov      %w1, #1        \n"				\
152 	    "   ld"#a"xrb %w0, [%2]     \n"				\
153 	    "   cmp      %w0, %w3       \n"				\
154 	    "   b.ne     2f             \n"				\
155 	    "   st"#l"xrb %w1, %w4, [%2]\n"				\
156 	    "2:"							\
157 	    : "=&r"(tmp), "=&r"(res)					\
158 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
159 	    : "cc", "memory"						\
160 	);								\
161 	*cmpval = tmp;							\
162 									\
163 	return (!res);							\
164 }									\
165 									\
166 static __inline int							\
167 atomic_fcmpset_##bar##16(volatile uint16_t *p, uint16_t *cmpval,	\
168     uint16_t newval)		 					\
169 {									\
170 	uint16_t tmp;							\
171 	uint16_t _cmpval = *cmpval;					\
172 	int res;							\
173 									\
174 	__asm __volatile(						\
175 	    "1: mov      %w1, #1        \n"				\
176 	    "   ld"#a"xrh %w0, [%2]      \n"				\
177 	    "   cmp      %w0, %w3       \n"				\
178 	    "   b.ne     2f             \n"				\
179 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
180 	    "2:"							\
181 	    : "=&r"(tmp), "=&r"(res)					\
182 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
183 	    : "cc", "memory"						\
184 	);								\
185 	*cmpval = tmp;							\
186 									\
187 	return (!res);							\
188 }									\
189 									\
190 static __inline int							\
191 atomic_fcmpset_##bar##32(volatile uint32_t *p, uint32_t *cmpval,	\
192     uint32_t newval)		 					\
193 {									\
194 	uint32_t tmp;							\
195 	uint32_t _cmpval = *cmpval;					\
196 	int res;							\
197 									\
198 	__asm __volatile(						\
199 	    "1: mov      %w1, #1        \n"				\
200 	    "   ld"#a"xr %w0, [%2]      \n"				\
201 	    "   cmp      %w0, %w3       \n"				\
202 	    "   b.ne     2f             \n"				\
203 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
204 	    "2:"							\
205 	    : "=&r"(tmp), "=&r"(res)					\
206 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
207 	    : "cc", "memory"						\
208 	);								\
209 	*cmpval = tmp;							\
210 									\
211 	return (!res);							\
212 }									\
213 									\
214 static __inline int							\
215 atomic_fcmpset_##bar##64(volatile uint64_t *p, uint64_t *cmpval,	\
216     uint64_t newval)							\
217 {									\
218 	uint64_t tmp;							\
219 	uint64_t _cmpval = *cmpval;					\
220 	int res;							\
221 									\
222 	__asm __volatile(						\
223 	    "1: mov      %w1, #1       \n"				\
224 	    "   ld"#a"xr %0, [%2]      \n"				\
225 	    "   cmp      %0, %3        \n"				\
226 	    "   b.ne     2f            \n"				\
227 	    "   st"#l"xr %w1, %4, [%2] \n"				\
228 	    "2:"							\
229 	    : "=&r"(tmp), "=&r"(res)					\
230 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
231 	    : "cc", "memory"						\
232 	);								\
233 	*cmpval = tmp;							\
234 									\
235 	return (!res);							\
236 }
237 
238 ATOMIC_FCMPSET(    ,  , )
239 ATOMIC_FCMPSET(acq_, a, )
240 ATOMIC_FCMPSET(rel_,  ,l)
241 
242 #undef ATOMIC_FCMPSET
243 
244 #define	ATOMIC_CMPSET(bar, a, l)					\
245 static __inline int							\
246 atomic_cmpset_##bar##8(volatile uint8_t *p, uint8_t cmpval,		\
247     uint8_t newval)							\
248 {									\
249 	uint8_t tmp;							\
250 	int res;							\
251 									\
252 	__asm __volatile(						\
253 	    "1: mov       %w1, #1        \n"				\
254 	    "   ld"#a"xrb %w0, [%2]      \n"				\
255 	    "   cmp       %w0, %w3       \n"				\
256 	    "   b.ne      2f             \n"				\
257 	    "   st"#l"xrb %w1, %w4, [%2] \n"				\
258             "   cbnz      %w1, 1b        \n"				\
259 	    "2:"							\
260 	    : "=&r"(tmp), "=&r"(res)					\
261 	    : "r" (p), "r" (cmpval), "r" (newval)			\
262 	    : "cc", "memory"							\
263 	);								\
264 									\
265 	return (!res);							\
266 }									\
267 									\
268 static __inline int							\
269 atomic_cmpset_##bar##16(volatile uint16_t *p, uint16_t cmpval,		\
270     uint16_t newval)							\
271 {									\
272 	uint16_t tmp;							\
273 	int res;							\
274 									\
275 	__asm __volatile(						\
276 	    "1: mov       %w1, #1        \n"				\
277 	    "   ld"#a"xrh %w0, [%2]      \n"				\
278 	    "   cmp       %w0, %w3       \n"				\
279 	    "   b.ne      2f             \n"				\
280 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
281             "   cbnz      %w1, 1b        \n"				\
282 	    "2:"							\
283 	    : "=&r"(tmp), "=&r"(res)					\
284 	    : "r" (p), "r" (cmpval), "r" (newval)			\
285 	    : "cc", "memory"							\
286 	);								\
287 									\
288 	return (!res);							\
289 }									\
290 									\
291 static __inline int							\
292 atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval,		\
293     uint32_t newval)							\
294 {									\
295 	uint32_t tmp;							\
296 	int res;							\
297 									\
298 	__asm __volatile(						\
299 	    "1: mov      %w1, #1        \n"				\
300 	    "   ld"#a"xr %w0, [%2]      \n"				\
301 	    "   cmp      %w0, %w3       \n"				\
302 	    "   b.ne     2f             \n"				\
303 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
304             "   cbnz     %w1, 1b        \n"				\
305 	    "2:"							\
306 	    : "=&r"(tmp), "=&r"(res)					\
307 	    : "r" (p), "r" (cmpval), "r" (newval)			\
308 	    : "cc", "memory"							\
309 	);								\
310 									\
311 	return (!res);							\
312 }									\
313 									\
314 static __inline int							\
315 atomic_cmpset_##bar##64(volatile uint64_t *p, uint64_t cmpval,		\
316     uint64_t newval)							\
317 {									\
318 	uint64_t tmp;							\
319 	int res;							\
320 									\
321 	__asm __volatile(						\
322 	    "1: mov      %w1, #1       \n"				\
323 	    "   ld"#a"xr %0, [%2]      \n"				\
324 	    "   cmp      %0, %3        \n"				\
325 	    "   b.ne     2f            \n"				\
326 	    "   st"#l"xr %w1, %4, [%2] \n"				\
327             "   cbnz     %w1, 1b       \n"				\
328 	    "2:"							\
329 	    : "=&r"(tmp), "=&r"(res)					\
330 	    : "r" (p), "r" (cmpval), "r" (newval)			\
331 	    : "cc", "memory"							\
332 	);								\
333 									\
334 	return (!res);							\
335 }
336 
337 ATOMIC_CMPSET(    ,  , )
338 ATOMIC_CMPSET(acq_, a, )
339 ATOMIC_CMPSET(rel_,  ,l)
340 
341 static __inline uint32_t
342 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
343 {
344 	uint32_t tmp, ret;
345 	int res;
346 
347 	__asm __volatile(
348 	    "1: ldxr	%w2, [%3]      \n"
349 	    "   add	%w0, %w2, %w4  \n"
350 	    "   stxr	%w1, %w0, [%3] \n"
351             "   cbnz	%w1, 1b        \n"
352 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
353 	    : "r" (p), "r" (val)
354 	    : "memory"
355 	);
356 
357 	return (ret);
358 }
359 
360 static __inline uint64_t
361 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
362 {
363 	uint64_t tmp, ret;
364 	int res;
365 
366 	__asm __volatile(
367 	    "1: ldxr	%2, [%3]      \n"
368 	    "   add	%0, %2, %4    \n"
369 	    "   stxr	%w1, %0, [%3] \n"
370             "   cbnz	%w1, 1b       \n"
371 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
372 	    : "r" (p), "r" (val)
373 	    : "memory"
374 	);
375 
376 	return (ret);
377 }
378 
379 static __inline uint32_t
380 atomic_readandclear_32(volatile uint32_t *p)
381 {
382 	uint32_t ret;
383 	int res;
384 
385 	__asm __volatile(
386 	    "1: ldxr	%w1, [%2]      \n"
387 	    "   stxr	%w0, wzr, [%2] \n"
388             "   cbnz	%w0, 1b        \n"
389 	    : "=&r"(res), "=&r"(ret)
390 	    : "r" (p)
391 	    : "memory"
392 	);
393 
394 	return (ret);
395 }
396 
397 static __inline uint64_t
398 atomic_readandclear_64(volatile uint64_t *p)
399 {
400 	uint64_t ret;
401 	int res;
402 
403 	__asm __volatile(
404 	    "1: ldxr	%1, [%2]      \n"
405 	    "   stxr	%w0, xzr, [%2] \n"
406             "   cbnz	%w0, 1b        \n"
407 	    : "=&r"(res), "=&r"(ret)
408 	    : "r" (p)
409 	    : "memory"
410 	);
411 
412 	return (ret);
413 }
414 
415 static __inline uint32_t
416 atomic_swap_32(volatile uint32_t *p, uint32_t val)
417 {
418 	uint32_t ret;
419 	int res;
420 
421 	__asm __volatile(
422 	    "1: ldxr	%w0, [%2]      \n"
423 	    "   stxr	%w1, %w3, [%2] \n"
424 	    "   cbnz	%w1, 1b        \n"
425 	    : "=&r"(ret), "=&r"(res)
426 	    : "r" (p), "r" (val)
427 	    : "memory"
428 	);
429 
430 	return (ret);
431 }
432 
433 static __inline uint64_t
434 atomic_swap_64(volatile uint64_t *p, uint64_t val)
435 {
436 	uint64_t ret;
437 	int res;
438 
439 	__asm __volatile(
440 	    "1: ldxr	%0, [%2]      \n"
441 	    "   stxr	%w1, %3, [%2] \n"
442 	    "   cbnz	%w1, 1b       \n"
443 	    : "=&r"(ret), "=&r"(res)
444 	    : "r" (p), "r" (val)
445 	    : "memory"
446 	);
447 
448 	return (ret);
449 }
450 
451 static __inline uint8_t
452 atomic_load_acq_8(volatile uint8_t *p)
453 {
454 	uint8_t ret;
455 
456 	__asm __volatile(
457 	    "ldarb	%w0, [%1] \n"
458 	    : "=&r" (ret)
459 	    : "r" (p)
460 	    : "memory");
461 
462 	return (ret);
463 }
464 
465 static __inline uint16_t
466 atomic_load_acq_16(volatile uint16_t *p)
467 {
468 	uint16_t ret;
469 
470 	__asm __volatile(
471 	    "ldarh	%w0, [%1] \n"
472 	    : "=&r" (ret)
473 	    : "r" (p)
474 	    : "memory");
475 
476 	return (ret);
477 }
478 
479 static __inline uint32_t
480 atomic_load_acq_32(volatile uint32_t *p)
481 {
482 	uint32_t ret;
483 
484 	__asm __volatile(
485 	    "ldar	%w0, [%1] \n"
486 	    : "=&r" (ret)
487 	    : "r" (p)
488 	    : "memory");
489 
490 	return (ret);
491 }
492 
493 static __inline uint64_t
494 atomic_load_acq_64(volatile uint64_t *p)
495 {
496 	uint64_t ret;
497 
498 	__asm __volatile(
499 	    "ldar	%0, [%1] \n"
500 	    : "=&r" (ret)
501 	    : "r" (p)
502 	    : "memory");
503 
504 	return (ret);
505 }
506 
507 static __inline void
508 atomic_store_rel_8(volatile uint8_t *p, uint8_t val)
509 {
510 
511 	__asm __volatile(
512 	    "stlrb	%w0, [%1] \n"
513 	    :
514 	    : "r" (val), "r" (p)
515 	    : "memory");
516 }
517 
518 static __inline void
519 atomic_store_rel_16(volatile uint16_t *p, uint16_t val)
520 {
521 
522 	__asm __volatile(
523 	    "stlrh	%w0, [%1] \n"
524 	    :
525 	    : "r" (val), "r" (p)
526 	    : "memory");
527 }
528 
529 static __inline void
530 atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
531 {
532 
533 	__asm __volatile(
534 	    "stlr	%w0, [%1] \n"
535 	    :
536 	    : "r" (val), "r" (p)
537 	    : "memory");
538 }
539 
540 static __inline void
541 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
542 {
543 
544 	__asm __volatile(
545 	    "stlr	%0, [%1] \n"
546 	    :
547 	    : "r" (val), "r" (p)
548 	    : "memory");
549 }
550 
551 static __inline int
552 atomic_testandclear_32(volatile uint32_t *p, u_int val)
553 {
554 	uint32_t mask, old, tmp;
555 	int res;
556 
557 	mask = 1u << (val & 0x1f);
558 	__asm __volatile(
559 	    "1: ldxr	%w2, [%3]      \n"
560 	    "   bic	%w0, %w2, %w4  \n"
561 	    "   stxr	%w1, %w0, [%3] \n"
562             "   cbnz	%w1, 1b        \n"
563 	    : "=&r"(tmp), "=&r"(res), "=&r"(old)
564 	    : "r" (p), "r" (mask)
565 	    : "memory"
566 	);
567 
568 	return ((old & mask) != 0);
569 }
570 
571 static __inline int
572 atomic_testandclear_64(volatile uint64_t *p, u_int val)
573 {
574 	uint64_t mask, old, tmp;
575 	int res;
576 
577 	mask = 1ul << (val & 0x1f);
578 	__asm __volatile(
579 	    "1: ldxr	%2, [%3]       \n"
580 	    "   bic	%0, %2, %4     \n"
581 	    "   stxr	%w1, %0, [%3]  \n"
582             "   cbnz	%w1, 1b        \n"
583 	    : "=&r"(tmp), "=&r"(res), "=&r"(old)
584 	    : "r" (p), "r" (mask)
585 	    : "memory"
586 	);
587 
588 	return ((old & mask) != 0);
589 }
590 
591 static __inline int
592 atomic_testandset_32(volatile uint32_t *p, u_int val)
593 {
594 	uint32_t mask, old, tmp;
595 	int res;
596 
597 	mask = 1u << (val & 0x1f);
598 	__asm __volatile(
599 	    "1: ldxr	%w2, [%3]      \n"
600 	    "   orr	%w0, %w2, %w4  \n"
601 	    "   stxr	%w1, %w0, [%3] \n"
602             "   cbnz	%w1, 1b        \n"
603 	    : "=&r"(tmp), "=&r"(res), "=&r"(old)
604 	    : "r" (p), "r" (mask)
605 	    : "memory"
606 	);
607 
608 	return ((old & mask) != 0);
609 }
610 
611 static __inline int
612 atomic_testandset_64(volatile uint64_t *p, u_int val)
613 {
614 	uint64_t mask, old, tmp;
615 	int res;
616 
617 	mask = 1ul << (val & 0x1f);
618 	__asm __volatile(
619 	    "1: ldxr	%2, [%3]       \n"
620 	    "   orr	%0, %2, %4     \n"
621 	    "   stxr	%w1, %0, [%3]  \n"
622             "   cbnz	%w1, 1b        \n"
623 	    : "=&r"(tmp), "=&r"(res), "=&r"(old)
624 	    : "r" (p), "r" (mask)
625 	    : "memory"
626 	);
627 
628 	return ((old & mask) != 0);
629 }
630 
631 
632 #define	atomic_add_int			atomic_add_32
633 #define	atomic_fcmpset_int		atomic_fcmpset_32
634 #define	atomic_clear_int		atomic_clear_32
635 #define	atomic_cmpset_int		atomic_cmpset_32
636 #define	atomic_fetchadd_int		atomic_fetchadd_32
637 #define	atomic_readandclear_int		atomic_readandclear_32
638 #define	atomic_set_int			atomic_set_32
639 #define	atomic_swap_int			atomic_swap_32
640 #define	atomic_subtract_int		atomic_subtract_32
641 #define	atomic_testandclear_int		atomic_testandclear_32
642 #define	atomic_testandset_int		atomic_testandset_32
643 
644 #define	atomic_add_acq_int		atomic_add_acq_32
645 #define	atomic_fcmpset_acq_int		atomic_fcmpset_acq_32
646 #define	atomic_clear_acq_int		atomic_clear_acq_32
647 #define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
648 #define	atomic_load_acq_int		atomic_load_acq_32
649 #define	atomic_set_acq_int		atomic_set_acq_32
650 #define	atomic_subtract_acq_int		atomic_subtract_acq_32
651 
652 #define	atomic_add_rel_int		atomic_add_rel_32
653 #define	atomic_fcmpset_rel_int		atomic_fcmpset_rel_32
654 #define	atomic_clear_rel_int		atomic_clear_rel_32
655 #define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
656 #define	atomic_set_rel_int		atomic_set_rel_32
657 #define	atomic_subtract_rel_int		atomic_subtract_rel_32
658 #define	atomic_store_rel_int		atomic_store_rel_32
659 
660 #define	atomic_add_long			atomic_add_64
661 #define	atomic_fcmpset_long		atomic_fcmpset_64
662 #define	atomic_clear_long		atomic_clear_64
663 #define	atomic_cmpset_long		atomic_cmpset_64
664 #define	atomic_fetchadd_long		atomic_fetchadd_64
665 #define	atomic_readandclear_long	atomic_readandclear_64
666 #define	atomic_set_long			atomic_set_64
667 #define	atomic_swap_long		atomic_swap_64
668 #define	atomic_subtract_long		atomic_subtract_64
669 #define	atomic_testandclear_long	atomic_testandclear_64
670 #define	atomic_testandset_long		atomic_testandset_64
671 
672 #define	atomic_add_ptr			atomic_add_64
673 #define	atomic_fcmpset_ptr		atomic_fcmpset_64
674 #define	atomic_clear_ptr		atomic_clear_64
675 #define	atomic_cmpset_ptr		atomic_cmpset_64
676 #define	atomic_fetchadd_ptr		atomic_fetchadd_64
677 #define	atomic_readandclear_ptr		atomic_readandclear_64
678 #define	atomic_set_ptr			atomic_set_64
679 #define	atomic_swap_ptr			atomic_swap_64
680 #define	atomic_subtract_ptr		atomic_subtract_64
681 
682 #define	atomic_add_acq_long		atomic_add_acq_64
683 #define	atomic_fcmpset_acq_long		atomic_fcmpset_acq_64
684 #define	atomic_clear_acq_long		atomic_clear_acq_64
685 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
686 #define	atomic_load_acq_long		atomic_load_acq_64
687 #define	atomic_set_acq_long		atomic_set_acq_64
688 #define	atomic_subtract_acq_long	atomic_subtract_acq_64
689 
690 #define	atomic_add_acq_ptr		atomic_add_acq_64
691 #define	atomic_fcmpset_acq_ptr		atomic_fcmpset_acq_64
692 #define	atomic_clear_acq_ptr		atomic_clear_acq_64
693 #define	atomic_cmpset_acq_ptr		atomic_cmpset_acq_64
694 #define	atomic_load_acq_ptr		atomic_load_acq_64
695 #define	atomic_set_acq_ptr		atomic_set_acq_64
696 #define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
697 
698 #define	atomic_add_rel_long		atomic_add_rel_64
699 #define	atomic_fcmpset_rel_long		atomic_fcmpset_rel_64
700 #define	atomic_clear_rel_long		atomic_clear_rel_64
701 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
702 #define	atomic_set_rel_long		atomic_set_rel_64
703 #define	atomic_subtract_rel_long	atomic_subtract_rel_64
704 #define	atomic_store_rel_long		atomic_store_rel_64
705 
706 #define	atomic_add_rel_ptr		atomic_add_rel_64
707 #define	atomic_fcmpset_rel_ptr		atomic_fcmpset_rel_64
708 #define	atomic_clear_rel_ptr		atomic_clear_rel_64
709 #define	atomic_cmpset_rel_ptr		atomic_cmpset_rel_64
710 #define	atomic_set_rel_ptr		atomic_set_rel_64
711 #define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
712 #define	atomic_store_rel_ptr		atomic_store_rel_64
713 
714 static __inline void
715 atomic_thread_fence_acq(void)
716 {
717 
718 	dmb(ld);
719 }
720 
721 static __inline void
722 atomic_thread_fence_rel(void)
723 {
724 
725 	dmb(sy);
726 }
727 
728 static __inline void
729 atomic_thread_fence_acq_rel(void)
730 {
731 
732 	dmb(sy);
733 }
734 
735 static __inline void
736 atomic_thread_fence_seq_cst(void)
737 {
738 
739 	dmb(sy);
740 }
741 
742 #endif /* KCSAN && !KCSAN_RUNTIME */
743 
744 #endif /* _MACHINE_ATOMIC_H_ */
745 
746