xref: /freebsd/sys/amd64/include/atomic.h (revision 724b4bfdf1306e4f2c451b6d146fe0fe0353b2c8)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define	_MACHINE_ATOMIC_H_
30 
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34 
35 #define	mb()	__asm __volatile("mfence;" : : : "memory")
36 #define	wmb()	__asm __volatile("sfence;" : : : "memory")
37 #define	rmb()	__asm __volatile("lfence;" : : : "memory")
38 
39 /*
40  * Various simple operations on memory, each of which is atomic in the
41  * presence of interrupts and multiple processors.
42  *
43  * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44  * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45  * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46  * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47  *
48  * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49  * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50  * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51  * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52  *
53  * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54  * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55  * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56  * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57  * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58  *
59  * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60  * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61  * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62  * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63  * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64  */
65 
66 /*
67  * The above functions are expanded inline in the statically-linked
68  * kernel.  Lock prefixes are generated if an SMP kernel is being
69  * built.
70  *
71  * Kernel modules call real functions which are built into the kernel.
72  * This allows kernel modules to be portable between UP and SMP systems.
73  */
74 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
77 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
78 
79 int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
80 int	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
81 u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
82 u_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
83 
84 #define	ATOMIC_LOAD(TYPE, LOP)					\
85 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
86 #define	ATOMIC_STORE(TYPE)					\
87 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
88 
89 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
90 
91 /*
92  * For userland, always use lock prefixes so that the binaries will run
93  * on both SMP and !SMP systems.
94  */
95 #if defined(SMP) || !defined(_KERNEL)
96 #define	MPLOCKED	"lock ; "
97 #else
98 #define	MPLOCKED
99 #endif
100 
101 /*
102  * The assembly is volatilized to avoid code chunk removal by the compiler.
103  * GCC aggressively reorders operations and memory clobbering is necessary
104  * in order to avoid that for memory barriers.
105  */
106 #define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
107 static __inline void					\
108 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
109 {							\
110 	__asm __volatile(MPLOCKED OP			\
111 	: "=m" (*p)					\
112 	: CONS (V), "m" (*p)				\
113 	: "cc");					\
114 }							\
115 							\
116 static __inline void					\
117 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
118 {							\
119 	__asm __volatile(MPLOCKED OP			\
120 	: "=m" (*p)					\
121 	: CONS (V), "m" (*p)				\
122 	: "memory", "cc");				\
123 }							\
124 struct __hack
125 
126 /*
127  * Atomic compare and set, used by the mutex functions
128  *
129  * if (*dst == expect) *dst = src (all 32 bit words)
130  *
131  * Returns 0 on failure, non-zero on success
132  */
133 
134 static __inline int
135 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
136 {
137 	u_char res;
138 
139 	__asm __volatile(
140 	"	" MPLOCKED "		"
141 	"	cmpxchgl %2,%1 ;	"
142 	"       sete	%0 ;		"
143 	"1:				"
144 	"# atomic_cmpset_int"
145 	: "=a" (res),			/* 0 */
146 	  "=m" (*dst)			/* 1 */
147 	: "r" (src),			/* 2 */
148 	  "a" (expect),			/* 3 */
149 	  "m" (*dst)			/* 4 */
150 	: "memory", "cc");
151 
152 	return (res);
153 }
154 
155 static __inline int
156 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
157 {
158 	u_char res;
159 
160 	__asm __volatile(
161 	"	" MPLOCKED "		"
162 	"	cmpxchgq %2,%1 ;	"
163 	"       sete	%0 ;		"
164 	"1:				"
165 	"# atomic_cmpset_long"
166 	: "=a" (res),			/* 0 */
167 	  "=m" (*dst)			/* 1 */
168 	: "r" (src),			/* 2 */
169 	  "a" (expect),			/* 3 */
170 	  "m" (*dst)			/* 4 */
171 	: "memory", "cc");
172 
173 	return (res);
174 }
175 
176 /*
177  * Atomically add the value of v to the integer pointed to by p and return
178  * the previous value of *p.
179  */
180 static __inline u_int
181 atomic_fetchadd_int(volatile u_int *p, u_int v)
182 {
183 
184 	__asm __volatile(
185 	"	" MPLOCKED "		"
186 	"	xaddl	%0, %1 ;	"
187 	"# atomic_fetchadd_int"
188 	: "+r" (v),			/* 0 (result) */
189 	  "=m" (*p)			/* 1 */
190 	: "m" (*p)			/* 2 */
191 	: "cc");
192 	return (v);
193 }
194 
195 /*
196  * Atomically add the value of v to the long integer pointed to by p and return
197  * the previous value of *p.
198  */
199 static __inline u_long
200 atomic_fetchadd_long(volatile u_long *p, u_long v)
201 {
202 
203 	__asm __volatile(
204 	"	" MPLOCKED "		"
205 	"	xaddq	%0, %1 ;	"
206 	"# atomic_fetchadd_long"
207 	: "+r" (v),			/* 0 (result) */
208 	  "=m" (*p)			/* 1 */
209 	: "m" (*p)			/* 2 */
210 	: "cc");
211 	return (v);
212 }
213 
214 /*
215  * We assume that a = b will do atomic loads and stores.  Due to the
216  * IA32 memory model, a simple store guarantees release semantics.
217  *
218  * However, loads may pass stores, so for atomic_load_acq we have to
219  * ensure a Store/Load barrier to do the load in SMP kernels.  We use
220  * "lock cmpxchg" as recommended by the AMD Software Optimization
221  * Guide, and not mfence.  For UP kernels, however, the cache of the
222  * single processor is always consistent, so we only need to take care
223  * of the compiler.
224  */
225 #define	ATOMIC_STORE(TYPE)				\
226 static __inline void					\
227 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
228 {							\
229 	__compiler_membar();				\
230 	*p = v;						\
231 }							\
232 struct __hack
233 
234 #if defined(_KERNEL) && !defined(SMP)
235 
236 #define	ATOMIC_LOAD(TYPE, LOP)				\
237 static __inline u_##TYPE				\
238 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
239 {							\
240 	u_##TYPE tmp;					\
241 							\
242 	tmp = *p;					\
243 	__compiler_membar();				\
244 	return (tmp);					\
245 }							\
246 struct __hack
247 
248 #else /* !(_KERNEL && !SMP) */
249 
250 #define	ATOMIC_LOAD(TYPE, LOP)				\
251 static __inline u_##TYPE				\
252 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
253 {							\
254 	u_##TYPE res;					\
255 							\
256 	__asm __volatile(MPLOCKED LOP			\
257 	: "=a" (res),			/* 0 */		\
258 	  "=m" (*p)			/* 1 */		\
259 	: "m" (*p)			/* 2 */		\
260 	: "memory", "cc");				\
261 							\
262 	return (res);					\
263 }							\
264 struct __hack
265 
266 #endif /* _KERNEL && !SMP */
267 
268 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
269 
270 ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
271 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
272 ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
273 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
274 
275 ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
276 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
277 ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
278 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
279 
280 ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
281 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
282 ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
283 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
284 
285 ATOMIC_ASM(set,	     long,  "orq %1,%0",   "ir",  v);
286 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
287 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
288 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
289 
290 ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
291 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
292 ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
293 ATOMIC_LOAD(long,  "cmpxchgq %0,%1");
294 
295 ATOMIC_STORE(char);
296 ATOMIC_STORE(short);
297 ATOMIC_STORE(int);
298 ATOMIC_STORE(long);
299 
300 #undef ATOMIC_ASM
301 #undef ATOMIC_LOAD
302 #undef ATOMIC_STORE
303 
304 #ifndef WANT_FUNCTIONS
305 
306 /* Read the current value and store a zero in the destination. */
307 #ifdef __GNUCLIKE_ASM
308 
309 static __inline u_int
310 atomic_readandclear_int(volatile u_int *addr)
311 {
312 	u_int res;
313 
314 	res = 0;
315 	__asm __volatile(
316 	"	xchgl	%1,%0 ;		"
317 	"# atomic_readandclear_int"
318 	: "+r" (res),			/* 0 */
319 	  "=m" (*addr)			/* 1 */
320 	: "m" (*addr));
321 
322 	return (res);
323 }
324 
325 static __inline u_long
326 atomic_readandclear_long(volatile u_long *addr)
327 {
328 	u_long res;
329 
330 	res = 0;
331 	__asm __volatile(
332 	"	xchgq	%1,%0 ;		"
333 	"# atomic_readandclear_long"
334 	: "+r" (res),			/* 0 */
335 	  "=m" (*addr)			/* 1 */
336 	: "m" (*addr));
337 
338 	return (res);
339 }
340 
341 #else /* !__GNUCLIKE_ASM */
342 
343 u_int	atomic_readandclear_int(volatile u_int *addr);
344 u_long	atomic_readandclear_long(volatile u_long *addr);
345 
346 #endif /* __GNUCLIKE_ASM */
347 
348 #define	atomic_set_acq_char		atomic_set_barr_char
349 #define	atomic_set_rel_char		atomic_set_barr_char
350 #define	atomic_clear_acq_char		atomic_clear_barr_char
351 #define	atomic_clear_rel_char		atomic_clear_barr_char
352 #define	atomic_add_acq_char		atomic_add_barr_char
353 #define	atomic_add_rel_char		atomic_add_barr_char
354 #define	atomic_subtract_acq_char	atomic_subtract_barr_char
355 #define	atomic_subtract_rel_char	atomic_subtract_barr_char
356 
357 #define	atomic_set_acq_short		atomic_set_barr_short
358 #define	atomic_set_rel_short		atomic_set_barr_short
359 #define	atomic_clear_acq_short		atomic_clear_barr_short
360 #define	atomic_clear_rel_short		atomic_clear_barr_short
361 #define	atomic_add_acq_short		atomic_add_barr_short
362 #define	atomic_add_rel_short		atomic_add_barr_short
363 #define	atomic_subtract_acq_short	atomic_subtract_barr_short
364 #define	atomic_subtract_rel_short	atomic_subtract_barr_short
365 
366 #define	atomic_set_acq_int		atomic_set_barr_int
367 #define	atomic_set_rel_int		atomic_set_barr_int
368 #define	atomic_clear_acq_int		atomic_clear_barr_int
369 #define	atomic_clear_rel_int		atomic_clear_barr_int
370 #define	atomic_add_acq_int		atomic_add_barr_int
371 #define	atomic_add_rel_int		atomic_add_barr_int
372 #define	atomic_subtract_acq_int		atomic_subtract_barr_int
373 #define	atomic_subtract_rel_int		atomic_subtract_barr_int
374 #define	atomic_cmpset_acq_int		atomic_cmpset_int
375 #define	atomic_cmpset_rel_int		atomic_cmpset_int
376 
377 #define	atomic_set_acq_long		atomic_set_barr_long
378 #define	atomic_set_rel_long		atomic_set_barr_long
379 #define	atomic_clear_acq_long		atomic_clear_barr_long
380 #define	atomic_clear_rel_long		atomic_clear_barr_long
381 #define	atomic_add_acq_long		atomic_add_barr_long
382 #define	atomic_add_rel_long		atomic_add_barr_long
383 #define	atomic_subtract_acq_long	atomic_subtract_barr_long
384 #define	atomic_subtract_rel_long	atomic_subtract_barr_long
385 #define	atomic_cmpset_acq_long		atomic_cmpset_long
386 #define	atomic_cmpset_rel_long		atomic_cmpset_long
387 
388 /* Operations on 8-bit bytes. */
389 #define	atomic_set_8		atomic_set_char
390 #define	atomic_set_acq_8	atomic_set_acq_char
391 #define	atomic_set_rel_8	atomic_set_rel_char
392 #define	atomic_clear_8		atomic_clear_char
393 #define	atomic_clear_acq_8	atomic_clear_acq_char
394 #define	atomic_clear_rel_8	atomic_clear_rel_char
395 #define	atomic_add_8		atomic_add_char
396 #define	atomic_add_acq_8	atomic_add_acq_char
397 #define	atomic_add_rel_8	atomic_add_rel_char
398 #define	atomic_subtract_8	atomic_subtract_char
399 #define	atomic_subtract_acq_8	atomic_subtract_acq_char
400 #define	atomic_subtract_rel_8	atomic_subtract_rel_char
401 #define	atomic_load_acq_8	atomic_load_acq_char
402 #define	atomic_store_rel_8	atomic_store_rel_char
403 
404 /* Operations on 16-bit words. */
405 #define	atomic_set_16		atomic_set_short
406 #define	atomic_set_acq_16	atomic_set_acq_short
407 #define	atomic_set_rel_16	atomic_set_rel_short
408 #define	atomic_clear_16		atomic_clear_short
409 #define	atomic_clear_acq_16	atomic_clear_acq_short
410 #define	atomic_clear_rel_16	atomic_clear_rel_short
411 #define	atomic_add_16		atomic_add_short
412 #define	atomic_add_acq_16	atomic_add_acq_short
413 #define	atomic_add_rel_16	atomic_add_rel_short
414 #define	atomic_subtract_16	atomic_subtract_short
415 #define	atomic_subtract_acq_16	atomic_subtract_acq_short
416 #define	atomic_subtract_rel_16	atomic_subtract_rel_short
417 #define	atomic_load_acq_16	atomic_load_acq_short
418 #define	atomic_store_rel_16	atomic_store_rel_short
419 
420 /* Operations on 32-bit double words. */
421 #define	atomic_set_32		atomic_set_int
422 #define	atomic_set_acq_32	atomic_set_acq_int
423 #define	atomic_set_rel_32	atomic_set_rel_int
424 #define	atomic_clear_32		atomic_clear_int
425 #define	atomic_clear_acq_32	atomic_clear_acq_int
426 #define	atomic_clear_rel_32	atomic_clear_rel_int
427 #define	atomic_add_32		atomic_add_int
428 #define	atomic_add_acq_32	atomic_add_acq_int
429 #define	atomic_add_rel_32	atomic_add_rel_int
430 #define	atomic_subtract_32	atomic_subtract_int
431 #define	atomic_subtract_acq_32	atomic_subtract_acq_int
432 #define	atomic_subtract_rel_32	atomic_subtract_rel_int
433 #define	atomic_load_acq_32	atomic_load_acq_int
434 #define	atomic_store_rel_32	atomic_store_rel_int
435 #define	atomic_cmpset_32	atomic_cmpset_int
436 #define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
437 #define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
438 #define	atomic_readandclear_32	atomic_readandclear_int
439 #define	atomic_fetchadd_32	atomic_fetchadd_int
440 
441 /* Operations on 64-bit quad words. */
442 #define	atomic_set_64		atomic_set_long
443 #define	atomic_set_acq_64	atomic_set_acq_long
444 #define	atomic_set_rel_64	atomic_set_rel_long
445 #define	atomic_clear_64		atomic_clear_long
446 #define	atomic_clear_acq_64	atomic_clear_acq_long
447 #define	atomic_clear_rel_64	atomic_clear_rel_long
448 #define	atomic_add_64		atomic_add_long
449 #define	atomic_add_acq_64	atomic_add_acq_long
450 #define	atomic_add_rel_64	atomic_add_rel_long
451 #define	atomic_subtract_64	atomic_subtract_long
452 #define	atomic_subtract_acq_64	atomic_subtract_acq_long
453 #define	atomic_subtract_rel_64	atomic_subtract_rel_long
454 #define	atomic_load_acq_64	atomic_load_acq_long
455 #define	atomic_store_rel_64	atomic_store_rel_long
456 #define	atomic_cmpset_64	atomic_cmpset_long
457 #define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
458 #define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
459 #define	atomic_readandclear_64	atomic_readandclear_long
460 
461 /* Operations on pointers. */
462 #define	atomic_set_ptr		atomic_set_long
463 #define	atomic_set_acq_ptr	atomic_set_acq_long
464 #define	atomic_set_rel_ptr	atomic_set_rel_long
465 #define	atomic_clear_ptr	atomic_clear_long
466 #define	atomic_clear_acq_ptr	atomic_clear_acq_long
467 #define	atomic_clear_rel_ptr	atomic_clear_rel_long
468 #define	atomic_add_ptr		atomic_add_long
469 #define	atomic_add_acq_ptr	atomic_add_acq_long
470 #define	atomic_add_rel_ptr	atomic_add_rel_long
471 #define	atomic_subtract_ptr	atomic_subtract_long
472 #define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
473 #define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
474 #define	atomic_load_acq_ptr	atomic_load_acq_long
475 #define	atomic_store_rel_ptr	atomic_store_rel_long
476 #define	atomic_cmpset_ptr	atomic_cmpset_long
477 #define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
478 #define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
479 #define	atomic_readandclear_ptr	atomic_readandclear_long
480 
481 #endif /* !WANT_FUNCTIONS */
482 
483 #endif /* !_MACHINE_ATOMIC_H_ */
484