xref: /linux/arch/s390/include/asm/barrier.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Copyright IBM Corp. 1999, 2009
3  *
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  */
6 
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9 
10 /*
11  * Force strict CPU ordering.
12  * And yes, this is required on UP too when we're talking
13  * to devices.
14  */
15 
16 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
17 /* Fast-BCR without checkpoint synchronization */
18 #define mb() do {  asm volatile("bcr 14,0" : : : "memory"); } while (0)
19 #else
20 #define mb() do {  asm volatile("bcr 15,0" : : : "memory"); } while (0)
21 #endif
22 
23 #define rmb()				mb()
24 #define wmb()				mb()
25 #define read_barrier_depends()		do { } while(0)
26 #define smp_mb()			mb()
27 #define smp_rmb()			rmb()
28 #define smp_wmb()			wmb()
29 #define smp_read_barrier_depends()	read_barrier_depends()
30 
31 #define smp_mb__before_atomic()		smp_mb()
32 #define smp_mb__after_atomic()		smp_mb()
33 
34 #define set_mb(var, value)		do { var = value; mb(); } while (0)
35 
36 #define smp_store_release(p, v)						\
37 do {									\
38 	compiletime_assert_atomic_type(*p);				\
39 	barrier();							\
40 	ACCESS_ONCE(*p) = (v);						\
41 } while (0)
42 
43 #define smp_load_acquire(p)						\
44 ({									\
45 	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
46 	compiletime_assert_atomic_type(*p);				\
47 	barrier();							\
48 	___p1;								\
49 })
50 
51 #endif /* __ASM_BARRIER_H */
52