xref: /freebsd/sys/arm64/include/atomic.h (revision 4133f23624058951a3b66e3ad735de980a485f36)
1 /*-
2  * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_MACHINE_ATOMIC_H_
30 #define	_MACHINE_ATOMIC_H_
31 
32 #include <sys/atomic_common.h>
33 
34 #define	isb()		__asm __volatile("isb" : : : "memory")
35 
36 /*
37  * Options for DMB and DSB:
38  *	oshld	Outer Shareable, load
39  *	oshst	Outer Shareable, store
40  *	osh	Outer Shareable, all
41  *	nshld	Non-shareable, load
42  *	nshst	Non-shareable, store
43  *	nsh	Non-shareable, all
44  *	ishld	Inner Shareable, load
45  *	ishst	Inner Shareable, store
46  *	ish	Inner Shareable, all
47  *	ld	Full system, load
48  *	st	Full system, store
49  *	sy	Full system, all
50  */
51 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
52 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
53 
54 #define	mb()	dmb(sy)	/* Full system memory barrier all */
55 #define	wmb()	dmb(st)	/* Full system memory barrier store */
56 #define	rmb()	dmb(ld)	/* Full system memory barrier load */
57 
58 #define	ATOMIC_OP(op, asm_op, bar, a, l)				\
59 static __inline void							\
60 atomic_##op##_##bar##8(volatile uint8_t *p, uint8_t val)		\
61 {									\
62 	uint8_t tmp;							\
63 	int res;							\
64 									\
65 	__asm __volatile(						\
66 	    "1: ld"#a"xrb  %w0, [%2]      \n"				\
67 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
68 	    "   st"#l"xrb  %w1, %w0, [%2] \n"				\
69             "   cbnz       %w1, 1b        \n"				\
70 	    : "=&r"(tmp), "=&r"(res)					\
71 	    : "r" (p), "r" (val)					\
72 	    : "memory"							\
73 	);								\
74 }									\
75 									\
76 static __inline void							\
77 atomic_##op##_##bar##16(volatile uint16_t *p, uint16_t val)		\
78 {									\
79 	uint16_t tmp;							\
80 	int res;							\
81 									\
82 	__asm __volatile(						\
83 	    "1: ld"#a"xrh  %w0, [%2]      \n"				\
84 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
85 	    "   st"#l"xrh  %w1, %w0, [%2] \n"				\
86             "   cbnz       %w1, 1b        \n"				\
87 	    : "=&r"(tmp), "=&r"(res)					\
88 	    : "r" (p), "r" (val)					\
89 	    : "memory"							\
90 	);								\
91 }									\
92 									\
93 static __inline void							\
94 atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val)		\
95 {									\
96 	uint32_t tmp;							\
97 	int res;							\
98 									\
99 	__asm __volatile(						\
100 	    "1: ld"#a"xr   %w0, [%2]      \n"				\
101 	    "   "#asm_op"  %w0, %w0, %w3  \n"				\
102 	    "   st"#l"xr   %w1, %w0, [%2] \n"				\
103             "   cbnz       %w1, 1b        \n"				\
104 	    : "=&r"(tmp), "=&r"(res)					\
105 	    : "r" (p), "r" (val)					\
106 	    : "memory"							\
107 	);								\
108 }									\
109 									\
110 static __inline void							\
111 atomic_##op##_##bar##64(volatile uint64_t *p, uint64_t val)		\
112 {									\
113 	uint64_t tmp;							\
114 	int res;							\
115 									\
116 	__asm __volatile(						\
117 	    "1: ld"#a"xr   %0, [%2]      \n"				\
118 	    "   "#asm_op"  %0, %0, %3    \n"				\
119 	    "   st"#l"xr   %w1, %0, [%2] \n"				\
120             "   cbnz       %w1, 1b       \n"				\
121 	    : "=&r"(tmp), "=&r"(res)					\
122 	    : "r" (p), "r" (val)					\
123 	    : "memory"							\
124 	);								\
125 }
126 
127 #define	ATOMIC(op, asm_op)						\
128     ATOMIC_OP(op, asm_op,     ,  ,  )					\
129     ATOMIC_OP(op, asm_op, acq_, a,  )					\
130     ATOMIC_OP(op, asm_op, rel_,  , l)					\
131 
132 ATOMIC(add,      add)
133 ATOMIC(clear,    bic)
134 ATOMIC(set,      orr)
135 ATOMIC(subtract, sub)
136 
137 #define	ATOMIC_FCMPSET(bar, a, l)					\
138 static __inline int							\
139 atomic_fcmpset_##bar##8(volatile uint8_t *p, uint8_t *cmpval,		\
140     uint8_t newval)		 					\
141 {									\
142 	uint8_t tmp;							\
143 	uint8_t _cmpval = *cmpval;					\
144 	int res;							\
145 									\
146 	__asm __volatile(						\
147 	    "1: mov      %w1, #1        \n"				\
148 	    "   ld"#a"xrb %w0, [%2]     \n"				\
149 	    "   cmp      %w0, %w3       \n"				\
150 	    "   b.ne     2f             \n"				\
151 	    "   st"#l"xrb %w1, %w4, [%2]\n"				\
152 	    "2:"							\
153 	    : "=&r"(tmp), "=&r"(res)					\
154 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
155 	    : "cc", "memory"						\
156 	);								\
157 	*cmpval = tmp;							\
158 									\
159 	return (!res);							\
160 }									\
161 									\
162 static __inline int							\
163 atomic_fcmpset_##bar##16(volatile uint16_t *p, uint16_t *cmpval,	\
164     uint16_t newval)		 					\
165 {									\
166 	uint16_t tmp;							\
167 	uint16_t _cmpval = *cmpval;					\
168 	int res;							\
169 									\
170 	__asm __volatile(						\
171 	    "1: mov      %w1, #1        \n"				\
172 	    "   ld"#a"xrh %w0, [%2]      \n"				\
173 	    "   cmp      %w0, %w3       \n"				\
174 	    "   b.ne     2f             \n"				\
175 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
176 	    "2:"							\
177 	    : "=&r"(tmp), "=&r"(res)					\
178 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
179 	    : "cc", "memory"						\
180 	);								\
181 	*cmpval = tmp;							\
182 									\
183 	return (!res);							\
184 }									\
185 									\
186 static __inline int							\
187 atomic_fcmpset_##bar##32(volatile uint32_t *p, uint32_t *cmpval,	\
188     uint32_t newval)		 					\
189 {									\
190 	uint32_t tmp;							\
191 	uint32_t _cmpval = *cmpval;					\
192 	int res;							\
193 									\
194 	__asm __volatile(						\
195 	    "1: mov      %w1, #1        \n"				\
196 	    "   ld"#a"xr %w0, [%2]      \n"				\
197 	    "   cmp      %w0, %w3       \n"				\
198 	    "   b.ne     2f             \n"				\
199 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
200 	    "2:"							\
201 	    : "=&r"(tmp), "=&r"(res)					\
202 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
203 	    : "cc", "memory"						\
204 	);								\
205 	*cmpval = tmp;							\
206 									\
207 	return (!res);							\
208 }									\
209 									\
210 static __inline int							\
211 atomic_fcmpset_##bar##64(volatile uint64_t *p, uint64_t *cmpval,	\
212     uint64_t newval)							\
213 {									\
214 	uint64_t tmp;							\
215 	uint64_t _cmpval = *cmpval;					\
216 	int res;							\
217 									\
218 	__asm __volatile(						\
219 	    "1: mov      %w1, #1       \n"				\
220 	    "   ld"#a"xr %0, [%2]      \n"				\
221 	    "   cmp      %0, %3        \n"				\
222 	    "   b.ne     2f            \n"				\
223 	    "   st"#l"xr %w1, %4, [%2] \n"				\
224 	    "2:"							\
225 	    : "=&r"(tmp), "=&r"(res)					\
226 	    : "r" (p), "r" (_cmpval), "r" (newval)			\
227 	    : "cc", "memory"						\
228 	);								\
229 	*cmpval = tmp;							\
230 									\
231 	return (!res);							\
232 }
233 
234 ATOMIC_FCMPSET(    ,  , )
235 ATOMIC_FCMPSET(acq_, a, )
236 ATOMIC_FCMPSET(rel_,  ,l)
237 
238 #undef ATOMIC_FCMPSET
239 
240 #define	ATOMIC_CMPSET(bar, a, l)					\
241 static __inline int							\
242 atomic_cmpset_##bar##8(volatile uint8_t *p, uint8_t cmpval,		\
243     uint8_t newval)							\
244 {									\
245 	uint8_t tmp;							\
246 	int res;							\
247 									\
248 	__asm __volatile(						\
249 	    "1: mov       %w1, #1        \n"				\
250 	    "   ld"#a"xrb %w0, [%2]      \n"				\
251 	    "   cmp       %w0, %w3       \n"				\
252 	    "   b.ne      2f             \n"				\
253 	    "   st"#l"xrb %w1, %w4, [%2] \n"				\
254             "   cbnz      %w1, 1b        \n"				\
255 	    "2:"							\
256 	    : "=&r"(tmp), "=&r"(res)					\
257 	    : "r" (p), "r" (cmpval), "r" (newval)			\
258 	    : "cc", "memory"							\
259 	);								\
260 									\
261 	return (!res);							\
262 }									\
263 									\
264 static __inline int							\
265 atomic_cmpset_##bar##16(volatile uint16_t *p, uint16_t cmpval,		\
266     uint16_t newval)							\
267 {									\
268 	uint16_t tmp;							\
269 	int res;							\
270 									\
271 	__asm __volatile(						\
272 	    "1: mov       %w1, #1        \n"				\
273 	    "   ld"#a"xrh %w0, [%2]      \n"				\
274 	    "   cmp       %w0, %w3       \n"				\
275 	    "   b.ne      2f             \n"				\
276 	    "   st"#l"xrh %w1, %w4, [%2] \n"				\
277             "   cbnz      %w1, 1b        \n"				\
278 	    "2:"							\
279 	    : "=&r"(tmp), "=&r"(res)					\
280 	    : "r" (p), "r" (cmpval), "r" (newval)			\
281 	    : "cc", "memory"							\
282 	);								\
283 									\
284 	return (!res);							\
285 }									\
286 									\
287 static __inline int							\
288 atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval,		\
289     uint32_t newval)							\
290 {									\
291 	uint32_t tmp;							\
292 	int res;							\
293 									\
294 	__asm __volatile(						\
295 	    "1: mov      %w1, #1        \n"				\
296 	    "   ld"#a"xr %w0, [%2]      \n"				\
297 	    "   cmp      %w0, %w3       \n"				\
298 	    "   b.ne     2f             \n"				\
299 	    "   st"#l"xr %w1, %w4, [%2] \n"				\
300             "   cbnz     %w1, 1b        \n"				\
301 	    "2:"							\
302 	    : "=&r"(tmp), "=&r"(res)					\
303 	    : "r" (p), "r" (cmpval), "r" (newval)			\
304 	    : "cc", "memory"							\
305 	);								\
306 									\
307 	return (!res);							\
308 }									\
309 									\
310 static __inline int							\
311 atomic_cmpset_##bar##64(volatile uint64_t *p, uint64_t cmpval,		\
312     uint64_t newval)							\
313 {									\
314 	uint64_t tmp;							\
315 	int res;							\
316 									\
317 	__asm __volatile(						\
318 	    "1: mov      %w1, #1       \n"				\
319 	    "   ld"#a"xr %0, [%2]      \n"				\
320 	    "   cmp      %0, %3        \n"				\
321 	    "   b.ne     2f            \n"				\
322 	    "   st"#l"xr %w1, %4, [%2] \n"				\
323             "   cbnz     %w1, 1b       \n"				\
324 	    "2:"							\
325 	    : "=&r"(tmp), "=&r"(res)					\
326 	    : "r" (p), "r" (cmpval), "r" (newval)			\
327 	    : "cc", "memory"							\
328 	);								\
329 									\
330 	return (!res);							\
331 }
332 
333 ATOMIC_CMPSET(    ,  , )
334 ATOMIC_CMPSET(acq_, a, )
335 ATOMIC_CMPSET(rel_,  ,l)
336 
337 static __inline uint32_t
338 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
339 {
340 	uint32_t tmp, ret;
341 	int res;
342 
343 	__asm __volatile(
344 	    "1: ldxr	%w2, [%3]      \n"
345 	    "   add	%w0, %w2, %w4  \n"
346 	    "   stxr	%w1, %w0, [%3] \n"
347             "   cbnz	%w1, 1b        \n"
348 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
349 	    : "r" (p), "r" (val)
350 	    : "memory"
351 	);
352 
353 	return (ret);
354 }
355 
356 static __inline uint64_t
357 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
358 {
359 	uint64_t tmp, ret;
360 	int res;
361 
362 	__asm __volatile(
363 	    "1: ldxr	%2, [%3]      \n"
364 	    "   add	%0, %2, %4    \n"
365 	    "   stxr	%w1, %0, [%3] \n"
366             "   cbnz	%w1, 1b       \n"
367 	    : "=&r"(tmp), "=&r"(res), "=&r"(ret)
368 	    : "r" (p), "r" (val)
369 	    : "memory"
370 	);
371 
372 	return (ret);
373 }
374 
375 static __inline uint32_t
376 atomic_readandclear_32(volatile uint32_t *p)
377 {
378 	uint32_t ret;
379 	int res;
380 
381 	__asm __volatile(
382 	    "1: ldxr	%w1, [%2]      \n"
383 	    "   stxr	%w0, wzr, [%2] \n"
384             "   cbnz	%w0, 1b        \n"
385 	    : "=&r"(res), "=&r"(ret)
386 	    : "r" (p)
387 	    : "memory"
388 	);
389 
390 	return (ret);
391 }
392 
393 static __inline uint64_t
394 atomic_readandclear_64(volatile uint64_t *p)
395 {
396 	uint64_t ret;
397 	int res;
398 
399 	__asm __volatile(
400 	    "1: ldxr	%1, [%2]      \n"
401 	    "   stxr	%w0, xzr, [%2] \n"
402             "   cbnz	%w0, 1b        \n"
403 	    : "=&r"(res), "=&r"(ret)
404 	    : "r" (p)
405 	    : "memory"
406 	);
407 
408 	return (ret);
409 }
410 
411 static __inline uint32_t
412 atomic_swap_32(volatile uint32_t *p, uint32_t val)
413 {
414 	uint32_t ret;
415 	int res;
416 
417 	__asm __volatile(
418 	    "1: ldxr	%w0, [%2]      \n"
419 	    "   stxr	%w1, %w3, [%2] \n"
420 	    "   cbnz	%w1, 1b        \n"
421 	    : "=&r"(ret), "=&r"(res)
422 	    : "r" (p), "r" (val)
423 	    : "memory"
424 	);
425 
426 	return (ret);
427 }
428 
429 static __inline uint64_t
430 atomic_swap_64(volatile uint64_t *p, uint64_t val)
431 {
432 	uint64_t ret;
433 	int res;
434 
435 	__asm __volatile(
436 	    "1: ldxr	%0, [%2]      \n"
437 	    "   stxr	%w1, %3, [%2] \n"
438 	    "   cbnz	%w1, 1b       \n"
439 	    : "=&r"(ret), "=&r"(res)
440 	    : "r" (p), "r" (val)
441 	    : "memory"
442 	);
443 
444 	return (ret);
445 }
446 
447 static __inline uint32_t
448 atomic_load_acq_32(volatile uint32_t *p)
449 {
450 	uint32_t ret;
451 
452 	__asm __volatile(
453 	    "ldar	%w0, [%1] \n"
454 	    : "=&r" (ret)
455 	    : "r" (p)
456 	    : "memory");
457 
458 	return (ret);
459 }
460 
461 static __inline uint64_t
462 atomic_load_acq_64(volatile uint64_t *p)
463 {
464 	uint64_t ret;
465 
466 	__asm __volatile(
467 	    "ldar	%0, [%1] \n"
468 	    : "=&r" (ret)
469 	    : "r" (p)
470 	    : "memory");
471 
472 	return (ret);
473 }
474 
475 static __inline void
476 atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
477 {
478 
479 	__asm __volatile(
480 	    "stlr	%w0, [%1] \n"
481 	    :
482 	    : "r" (val), "r" (p)
483 	    : "memory");
484 }
485 
486 static __inline void
487 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
488 {
489 
490 	__asm __volatile(
491 	    "stlr	%0, [%1] \n"
492 	    :
493 	    : "r" (val), "r" (p)
494 	    : "memory");
495 }
496 
497 
498 #define	atomic_add_int			atomic_add_32
499 #define	atomic_fcmpset_int		atomic_fcmpset_32
500 #define	atomic_clear_int		atomic_clear_32
501 #define	atomic_cmpset_int		atomic_cmpset_32
502 #define	atomic_fetchadd_int		atomic_fetchadd_32
503 #define	atomic_readandclear_int		atomic_readandclear_32
504 #define	atomic_set_int			atomic_set_32
505 #define	atomic_swap_int			atomic_swap_32
506 #define	atomic_subtract_int		atomic_subtract_32
507 
508 #define	atomic_add_acq_int		atomic_add_acq_32
509 #define	atomic_fcmpset_acq_int		atomic_fcmpset_acq_32
510 #define	atomic_clear_acq_int		atomic_clear_acq_32
511 #define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
512 #define	atomic_load_acq_int		atomic_load_acq_32
513 #define	atomic_set_acq_int		atomic_set_acq_32
514 #define	atomic_subtract_acq_int		atomic_subtract_acq_32
515 
516 #define	atomic_add_rel_int		atomic_add_rel_32
517 #define	atomic_fcmpset_rel_int		atomic_fcmpset_rel_32
518 #define	atomic_clear_rel_int		atomic_clear_rel_32
519 #define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
520 #define	atomic_set_rel_int		atomic_set_rel_32
521 #define	atomic_subtract_rel_int		atomic_subtract_rel_32
522 #define	atomic_store_rel_int		atomic_store_rel_32
523 
524 #define	atomic_add_long			atomic_add_64
525 #define	atomic_fcmpset_long		atomic_fcmpset_64
526 #define	atomic_clear_long		atomic_clear_64
527 #define	atomic_cmpset_long		atomic_cmpset_64
528 #define	atomic_fetchadd_long		atomic_fetchadd_64
529 #define	atomic_readandclear_long	atomic_readandclear_64
530 #define	atomic_set_long			atomic_set_64
531 #define	atomic_swap_long		atomic_swap_64
532 #define	atomic_subtract_long		atomic_subtract_64
533 
534 #define	atomic_add_ptr			atomic_add_64
535 #define	atomic_fcmpset_ptr		atomic_fcmpset_64
536 #define	atomic_clear_ptr		atomic_clear_64
537 #define	atomic_cmpset_ptr		atomic_cmpset_64
538 #define	atomic_fetchadd_ptr		atomic_fetchadd_64
539 #define	atomic_readandclear_ptr		atomic_readandclear_64
540 #define	atomic_set_ptr			atomic_set_64
541 #define	atomic_swap_ptr			atomic_swap_64
542 #define	atomic_subtract_ptr		atomic_subtract_64
543 
544 #define	atomic_add_acq_long		atomic_add_acq_64
545 #define	atomic_fcmpset_acq_long		atomic_fcmpset_acq_64
546 #define	atomic_clear_acq_long		atomic_clear_acq_64
547 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
548 #define	atomic_load_acq_long		atomic_load_acq_64
549 #define	atomic_set_acq_long		atomic_set_acq_64
550 #define	atomic_subtract_acq_long	atomic_subtract_acq_64
551 
552 #define	atomic_add_acq_ptr		atomic_add_acq_64
553 #define	atomic_fcmpset_acq_ptr		atomic_fcmpset_acq_64
554 #define	atomic_clear_acq_ptr		atomic_clear_acq_64
555 #define	atomic_cmpset_acq_ptr		atomic_cmpset_acq_64
556 #define	atomic_load_acq_ptr		atomic_load_acq_64
557 #define	atomic_set_acq_ptr		atomic_set_acq_64
558 #define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
559 
560 #define	atomic_add_rel_long		atomic_add_rel_64
561 #define	atomic_fcmpset_rel_long		atomic_fcmpset_rel_64
562 #define	atomic_clear_rel_long		atomic_clear_rel_64
563 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
564 #define	atomic_set_rel_long		atomic_set_rel_64
565 #define	atomic_subtract_rel_long	atomic_subtract_rel_64
566 #define	atomic_store_rel_long		atomic_store_rel_64
567 
568 #define	atomic_add_rel_ptr		atomic_add_rel_64
569 #define	atomic_fcmpset_rel_ptr		atomic_fcmpset_rel_64
570 #define	atomic_clear_rel_ptr		atomic_clear_rel_64
571 #define	atomic_cmpset_rel_ptr		atomic_cmpset_rel_64
572 #define	atomic_set_rel_ptr		atomic_set_rel_64
573 #define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
574 #define	atomic_store_rel_ptr		atomic_store_rel_64
575 
576 static __inline void
577 atomic_thread_fence_acq(void)
578 {
579 
580 	dmb(ld);
581 }
582 
583 static __inline void
584 atomic_thread_fence_rel(void)
585 {
586 
587 	dmb(sy);
588 }
589 
590 static __inline void
591 atomic_thread_fence_acq_rel(void)
592 {
593 
594 	dmb(sy);
595 }
596 
597 static __inline void
598 atomic_thread_fence_seq_cst(void)
599 {
600 
601 	dmb(sy);
602 }
603 
604 #endif /* _MACHINE_ATOMIC_H_ */
605 
606