xref: /linux/arch/mips/include/asm/barrier.h (revision c0c914eca7f251c70facc37dfebeaf176601918d)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7  */
8 #ifndef __ASM_BARRIER_H
9 #define __ASM_BARRIER_H
10 
11 #include <asm/addrspace.h>
12 
13 #ifdef CONFIG_CPU_HAS_SYNC
14 #define __sync()				\
15 	__asm__ __volatile__(			\
16 		".set	push\n\t"		\
17 		".set	noreorder\n\t"		\
18 		".set	mips2\n\t"		\
19 		"sync\n\t"			\
20 		".set	pop"			\
21 		: /* no output */		\
22 		: /* no input */		\
23 		: "memory")
24 #else
25 #define __sync()	do { } while(0)
26 #endif
27 
28 #define __fast_iob()				\
29 	__asm__ __volatile__(			\
30 		".set	push\n\t"		\
31 		".set	noreorder\n\t"		\
32 		"lw	$0,%0\n\t"		\
33 		"nop\n\t"			\
34 		".set	pop"			\
35 		: /* no output */		\
36 		: "m" (*(int *)CKSEG1)		\
37 		: "memory")
38 #ifdef CONFIG_CPU_CAVIUM_OCTEON
39 # define OCTEON_SYNCW_STR	".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
40 # define __syncw()	__asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
41 
42 # define fast_wmb()	__syncw()
43 # define fast_rmb()	barrier()
44 # define fast_mb()	__sync()
45 # define fast_iob()	do { } while (0)
46 #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
47 # define fast_wmb()	__sync()
48 # define fast_rmb()	__sync()
49 # define fast_mb()	__sync()
50 # ifdef CONFIG_SGI_IP28
51 #  define fast_iob()				\
52 	__asm__ __volatile__(			\
53 		".set	push\n\t"		\
54 		".set	noreorder\n\t"		\
55 		"lw	$0,%0\n\t"		\
56 		"sync\n\t"			\
57 		"lw	$0,%0\n\t"		\
58 		".set	pop"			\
59 		: /* no output */		\
60 		: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
61 		: "memory")
62 # else
63 #  define fast_iob()				\
64 	do {					\
65 		__sync();			\
66 		__fast_iob();			\
67 	} while (0)
68 # endif
69 #endif /* CONFIG_CPU_CAVIUM_OCTEON */
70 
71 #ifdef CONFIG_CPU_HAS_WB
72 
73 #include <asm/wbflush.h>
74 
75 #define mb()		wbflush()
76 #define iob()		wbflush()
77 
78 #else /* !CONFIG_CPU_HAS_WB */
79 
80 #define mb()		fast_mb()
81 #define iob()		fast_iob()
82 
83 #endif /* !CONFIG_CPU_HAS_WB */
84 
85 #define wmb()		fast_wmb()
86 #define rmb()		fast_rmb()
87 
88 #if defined(CONFIG_WEAK_ORDERING)
89 # ifdef CONFIG_CPU_CAVIUM_OCTEON
90 #  define __smp_mb()	__sync()
91 #  define __smp_rmb()	barrier()
92 #  define __smp_wmb()	__syncw()
93 # else
94 #  define __smp_mb()	__asm__ __volatile__("sync" : : :"memory")
95 #  define __smp_rmb()	__asm__ __volatile__("sync" : : :"memory")
96 #  define __smp_wmb()	__asm__ __volatile__("sync" : : :"memory")
97 # endif
98 #else
99 #define __smp_mb()	barrier()
100 #define __smp_rmb()	barrier()
101 #define __smp_wmb()	barrier()
102 #endif
103 
104 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
105 #define __WEAK_LLSC_MB		"	sync	\n"
106 #else
107 #define __WEAK_LLSC_MB		"		\n"
108 #endif
109 
110 #define smp_llsc_mb()	__asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
111 
112 #ifdef CONFIG_CPU_CAVIUM_OCTEON
113 #define smp_mb__before_llsc() smp_wmb()
114 #define __smp_mb__before_llsc() __smp_wmb()
115 /* Cause previous writes to become visible on all CPUs as soon as possible */
116 #define nudge_writes() __asm__ __volatile__(".set push\n\t"		\
117 					    ".set arch=octeon\n\t"	\
118 					    "syncw\n\t"			\
119 					    ".set pop" : : : "memory")
120 #else
121 #define smp_mb__before_llsc() smp_llsc_mb()
122 #define __smp_mb__before_llsc() smp_llsc_mb()
123 #define nudge_writes() mb()
124 #endif
125 
126 #define __smp_mb__before_atomic()	__smp_mb__before_llsc()
127 #define __smp_mb__after_atomic()	smp_llsc_mb()
128 
129 #include <asm-generic/barrier.h>
130 
131 #endif /* __ASM_BARRIER_H */
132