xref: /linux/arch/mips/include/asm/hazards.h (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
7  * Copyright (C) MIPS Technologies, Inc.
8  *   written by Ralf Baechle <ralf@linux-mips.org>
9  */
10 #ifndef _ASM_HAZARDS_H
11 #define _ASM_HAZARDS_H
12 
13 #include <linux/stringify.h>
14 #include <asm/compiler.h>
15 
16 #define ___ssnop							\
17 	sll	$0, $0, 1
18 
19 #define ___ehb								\
20 	sll	$0, $0, 3
21 
22 /*
23  * TLB hazards
24  */
25 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
26 
27 /*
28  * MIPSR2 defines ehb for hazard avoidance
29  */
30 
31 #define __mtc0_tlbw_hazard						\
32 	___ehb
33 
34 #define __tlbw_use_hazard						\
35 	___ehb
36 
37 #define __tlb_probe_hazard						\
38 	___ehb
39 
40 #define __irq_enable_hazard						\
41 	___ehb
42 
43 #define __irq_disable_hazard						\
44 	___ehb
45 
46 #define __back_to_back_c0_hazard					\
47 	___ehb
48 
49 /*
50  * gcc has a tradition of misscompiling the previous construct using the
51  * address of a label as argument to inline assembler.	Gas otoh has the
52  * annoying difference between la and dla which are only usable for 32-bit
53  * rsp. 64-bit code, so can't be used without conditional compilation.
54  * The alterantive is switching the assembler to 64-bit code which happens
55  * to work right even for 32-bit code ...
56  */
57 #define instruction_hazard()						\
58 do {									\
59 	unsigned long tmp;						\
60 									\
61 	__asm__ __volatile__(						\
62 	"	.set "MIPS_ISA_LEVEL"				\n"	\
63 	"	dla	%0, 1f					\n"	\
64 	"	jr.hb	%0					\n"	\
65 	"	.set	mips0					\n"	\
66 	"1:							\n"	\
67 	: "=r" (tmp));							\
68 } while (0)
69 
70 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
71 	defined(CONFIG_CPU_BMIPS)
72 
73 /*
74  * These are slightly complicated by the fact that we guarantee R1 kernels to
75  * run fine on R2 processors.
76  */
77 
78 #define __mtc0_tlbw_hazard						\
79 	___ssnop;							\
80 	___ssnop;							\
81 	___ehb
82 
83 #define __tlbw_use_hazard						\
84 	___ssnop;							\
85 	___ssnop;							\
86 	___ssnop;							\
87 	___ehb
88 
89 #define __tlb_probe_hazard						\
90 	___ssnop;							\
91 	___ssnop;							\
92 	___ssnop;							\
93 	___ehb
94 
95 #define __irq_enable_hazard						\
96 	___ssnop;							\
97 	___ssnop;							\
98 	___ssnop;							\
99 	___ehb
100 
101 #define __irq_disable_hazard						\
102 	___ssnop;							\
103 	___ssnop;							\
104 	___ssnop;							\
105 	___ehb
106 
107 #define __back_to_back_c0_hazard					\
108 	___ssnop;							\
109 	___ssnop;							\
110 	___ssnop;							\
111 	___ehb
112 
113 /*
114  * gcc has a tradition of misscompiling the previous construct using the
115  * address of a label as argument to inline assembler.	Gas otoh has the
116  * annoying difference between la and dla which are only usable for 32-bit
117  * rsp. 64-bit code, so can't be used without conditional compilation.
118  * The alterantive is switching the assembler to 64-bit code which happens
119  * to work right even for 32-bit code ...
120  */
121 #define __instruction_hazard()						\
122 do {									\
123 	unsigned long tmp;						\
124 									\
125 	__asm__ __volatile__(						\
126 	"	.set	mips64r2				\n"	\
127 	"	dla	%0, 1f					\n"	\
128 	"	jr.hb	%0					\n"	\
129 	"	.set	mips0					\n"	\
130 	"1:							\n"	\
131 	: "=r" (tmp));							\
132 } while (0)
133 
134 #define instruction_hazard()						\
135 do {									\
136 	if (cpu_has_mips_r2_r6)						\
137 		__instruction_hazard();					\
138 } while (0)
139 
140 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
141 	defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
142 	defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
143 
144 /*
145  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
146  */
147 
148 #define __mtc0_tlbw_hazard
149 
150 #define __tlbw_use_hazard
151 
152 #define __tlb_probe_hazard
153 
154 #define __irq_enable_hazard
155 
156 #define __irq_disable_hazard
157 
158 #define __back_to_back_c0_hazard
159 
160 #define instruction_hazard() do { } while (0)
161 
162 #elif defined(CONFIG_CPU_SB1)
163 
164 /*
165  * Mostly like R4000 for historic reasons
166  */
167 #define __mtc0_tlbw_hazard
168 
169 #define __tlbw_use_hazard
170 
171 #define __tlb_probe_hazard
172 
173 #define __irq_enable_hazard
174 
175 #define __irq_disable_hazard						\
176 	___ssnop;							\
177 	___ssnop;							\
178 	___ssnop
179 
180 #define __back_to_back_c0_hazard
181 
182 #define instruction_hazard() do { } while (0)
183 
184 #else
185 
186 /*
187  * Finally the catchall case for all other processors including R4000, R4400,
188  * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
189  *
190  * The taken branch will result in a two cycle penalty for the two killed
191  * instructions on R4000 / R4400.  Other processors only have a single cycle
192  * hazard so this is nice trick to have an optimal code for a range of
193  * processors.
194  */
195 #define __mtc0_tlbw_hazard						\
196 	nop;								\
197 	nop
198 
199 #define __tlbw_use_hazard						\
200 	nop;								\
201 	nop;								\
202 	nop
203 
204 #define __tlb_probe_hazard						\
205 	nop;								\
206 	nop;								\
207 	nop
208 
209 #define __irq_enable_hazard						\
210 	___ssnop;							\
211 	___ssnop;							\
212 	___ssnop
213 
214 #define __irq_disable_hazard						\
215 	nop;								\
216 	nop;								\
217 	nop
218 
219 #define __back_to_back_c0_hazard					\
220 	___ssnop;							\
221 	___ssnop;							\
222 	___ssnop
223 
224 #define instruction_hazard() do { } while (0)
225 
226 #endif
227 
228 
229 /* FPU hazards */
230 
231 #if defined(CONFIG_CPU_SB1)
232 
233 #define __enable_fpu_hazard						\
234 	.set	push;							\
235 	.set	mips64;							\
236 	.set	noreorder;						\
237 	___ssnop;							\
238 	bnezl	$0, .+4;						\
239 	___ssnop;							\
240 	.set	pop
241 
242 #define __disable_fpu_hazard
243 
244 #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
245 
246 #define __enable_fpu_hazard						\
247 	___ehb
248 
249 #define __disable_fpu_hazard						\
250 	___ehb
251 
252 #else
253 
254 #define __enable_fpu_hazard						\
255 	nop;								\
256 	nop;								\
257 	nop;								\
258 	nop
259 
260 #define __disable_fpu_hazard						\
261 	___ehb
262 
263 #endif
264 
265 #ifdef __ASSEMBLY__
266 
267 #define _ssnop ___ssnop
268 #define	_ehb ___ehb
269 #define mtc0_tlbw_hazard __mtc0_tlbw_hazard
270 #define tlbw_use_hazard __tlbw_use_hazard
271 #define tlb_probe_hazard __tlb_probe_hazard
272 #define irq_enable_hazard __irq_enable_hazard
273 #define irq_disable_hazard __irq_disable_hazard
274 #define back_to_back_c0_hazard __back_to_back_c0_hazard
275 #define enable_fpu_hazard __enable_fpu_hazard
276 #define disable_fpu_hazard __disable_fpu_hazard
277 
278 #else
279 
280 #define _ssnop()							\
281 do {									\
282 	__asm__ __volatile__(						\
283 	__stringify(___ssnop)						\
284 	);								\
285 } while (0)
286 
287 #define	_ehb()								\
288 do {									\
289 	__asm__ __volatile__(						\
290 	__stringify(___ehb)						\
291 	);								\
292 } while (0)
293 
294 
295 #define mtc0_tlbw_hazard()						\
296 do {									\
297 	__asm__ __volatile__(						\
298 	__stringify(__mtc0_tlbw_hazard)					\
299 	);								\
300 } while (0)
301 
302 
303 #define tlbw_use_hazard()						\
304 do {									\
305 	__asm__ __volatile__(						\
306 	__stringify(__tlbw_use_hazard)					\
307 	);								\
308 } while (0)
309 
310 
311 #define tlb_probe_hazard()						\
312 do {									\
313 	__asm__ __volatile__(						\
314 	__stringify(__tlb_probe_hazard)					\
315 	);								\
316 } while (0)
317 
318 
319 #define irq_enable_hazard()						\
320 do {									\
321 	__asm__ __volatile__(						\
322 	__stringify(__irq_enable_hazard)				\
323 	);								\
324 } while (0)
325 
326 
327 #define irq_disable_hazard()						\
328 do {									\
329 	__asm__ __volatile__(						\
330 	__stringify(__irq_disable_hazard)				\
331 	);								\
332 } while (0)
333 
334 
335 #define back_to_back_c0_hazard() 					\
336 do {									\
337 	__asm__ __volatile__(						\
338 	__stringify(__back_to_back_c0_hazard)				\
339 	);								\
340 } while (0)
341 
342 
343 #define enable_fpu_hazard()						\
344 do {									\
345 	__asm__ __volatile__(						\
346 	__stringify(__enable_fpu_hazard)				\
347 	);								\
348 } while (0)
349 
350 
351 #define disable_fpu_hazard()						\
352 do {									\
353 	__asm__ __volatile__(						\
354 	__stringify(__disable_fpu_hazard)				\
355 	);								\
356 } while (0)
357 
358 /*
359  * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
360  */
361 extern void mips_ihb(void);
362 
363 #endif /* __ASSEMBLY__  */
364 
365 #endif /* _ASM_HAZARDS_H */
366