xref: /linux/arch/x86/include/asm/barrier.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3 
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6 
7 /*
8  * Force strict CPU ordering.
9  * And yes, this might be required on UP too when we're talking
10  * to devices.
11  */
12 
13 #ifdef CONFIG_X86_32
14 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15 				      X86_FEATURE_XMM2) ::: "memory", "cc")
16 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17 				       X86_FEATURE_XMM2) ::: "memory", "cc")
18 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19 				       X86_FEATURE_XMM2) ::: "memory", "cc")
20 #else
21 #define mb() 	asm volatile("mfence":::"memory")
22 #define rmb()	asm volatile("lfence":::"memory")
23 #define wmb()	asm volatile("sfence" ::: "memory")
24 #endif
25 
26 #ifdef CONFIG_X86_PPRO_FENCE
27 #define dma_rmb()	rmb()
28 #else
29 #define dma_rmb()	barrier()
30 #endif
31 #define dma_wmb()	barrier()
32 
33 #define __smp_mb()	mb()
34 #define __smp_rmb()	dma_rmb()
35 #define __smp_wmb()	barrier()
36 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
37 
38 #if defined(CONFIG_X86_PPRO_FENCE)
39 
40 /*
41  * For this option x86 doesn't have a strong TSO memory
42  * model and we should fall back to full barriers.
43  */
44 
45 #define __smp_store_release(p, v)					\
46 do {									\
47 	compiletime_assert_atomic_type(*p);				\
48 	__smp_mb();							\
49 	WRITE_ONCE(*p, v);						\
50 } while (0)
51 
52 #define __smp_load_acquire(p)						\
53 ({									\
54 	typeof(*p) ___p1 = READ_ONCE(*p);				\
55 	compiletime_assert_atomic_type(*p);				\
56 	__smp_mb();							\
57 	___p1;								\
58 })
59 
60 #else /* regular x86 TSO memory ordering */
61 
62 #define __smp_store_release(p, v)					\
63 do {									\
64 	compiletime_assert_atomic_type(*p);				\
65 	barrier();							\
66 	WRITE_ONCE(*p, v);						\
67 } while (0)
68 
69 #define __smp_load_acquire(p)						\
70 ({									\
71 	typeof(*p) ___p1 = READ_ONCE(*p);				\
72 	compiletime_assert_atomic_type(*p);				\
73 	barrier();							\
74 	___p1;								\
75 })
76 
77 #endif
78 
79 /* Atomic operations are already serializing on x86 */
80 #define __smp_mb__before_atomic()	barrier()
81 #define __smp_mb__after_atomic()	barrier()
82 
83 #include <asm-generic/barrier.h>
84 
85 #endif /* _ASM_X86_BARRIER_H */
86