xref: /linux/arch/mips/kernel/r4k-bugs64.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2003, 2004, 2007  Maciej W. Rozycki
4  */
5 #include <linux/context_tracking.h>
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/ptrace.h>
9 #include <linux/stddef.h>
10 
11 #include <asm/bugs.h>
12 #include <asm/compiler.h>
13 #include <asm/cpu.h>
14 #include <asm/fpu.h>
15 #include <asm/mipsregs.h>
16 #include <asm/setup.h>
17 #include <asm/traps.h>
18 
19 static char bug64hit[] __initdata =
20 	"reliable operation impossible!\n%s";
21 static char nowar[] __initdata =
22 	"Please report to <linux-mips@vger.kernel.org>.";
23 static char r4kwar[] __initdata =
24 	"Enable CPU_R4000_WORKAROUNDS to rectify.";
25 static char daddiwar[] __initdata =
26 	"Enable CPU_DADDI_WORKAROUNDS to rectify.";
27 
28 static __always_inline __init
align_mod(const int align,const int mod)29 void align_mod(const int align, const int mod)
30 {
31 	asm volatile(
32 		".set	push\n\t"
33 		".set	noreorder\n\t"
34 		".balign %0\n\t"
35 		".rept	%1\n\t"
36 		"nop\n\t"
37 		".endr\n\t"
38 		".set	pop"
39 		:
40 		: "n"(align), "n"(mod));
41 }
42 
43 static __always_inline __init
mult_sh_align_mod(long * v1,long * v2,long * w,const int align,const int mod)44 void mult_sh_align_mod(long *v1, long *v2, long *w,
45 		       const int align, const int mod)
46 {
47 	unsigned long flags;
48 	int m1, m2;
49 	long p, s, lv1, lv2, lw;
50 
51 	/*
52 	 * We want the multiply and the shift to be isolated from the
53 	 * rest of the code to disable gcc optimizations.  Hence the
54 	 * asm statements that execute nothing, but make gcc not know
55 	 * what the values of m1, m2 and s are and what lv2 and p are
56 	 * used for.
57 	 */
58 
59 	local_irq_save(flags);
60 	/*
61 	 * The following code leads to a wrong result of the first
62 	 * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
63 	 * 00000422 or 00000430, respectively).
64 	 *
65 	 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
66 	 * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
67 	 * details.  I got no permission to duplicate them here,
68 	 * sigh... --macro
69 	 */
70 	asm volatile(
71 		""
72 		: "=r" (m1), "=r" (m2), "=r" (s)
73 		: "0" (5), "1" (8), "2" (5));
74 	align_mod(align, mod);
75 	/*
76 	 * The trailing nop is needed to fulfill the two-instruction
77 	 * requirement between reading hi/lo and staring a mult/div.
78 	 * Leaving it out may cause gas insert a nop itself breaking
79 	 * the desired alignment of the next chunk.
80 	 */
81 	asm volatile(
82 		".set	push\n\t"
83 		".set	noat\n\t"
84 		".set	noreorder\n\t"
85 		".set	nomacro\n\t"
86 		"mult	%2, %3\n\t"
87 		"dsll32 %0, %4, %5\n\t"
88 		"mflo	$0\n\t"
89 		"dsll32 %1, %4, %5\n\t"
90 		"nop\n\t"
91 		".set	pop"
92 		: "=&r" (lv1), "=r" (lw)
93 		: "r" (m1), "r" (m2), "r" (s), "I" (0)
94 		: "hi", "lo", "$0");
95 	/* We have to use single integers for m1 and m2 and a double
96 	 * one for p to be sure the mulsidi3 gcc's RTL multiplication
97 	 * instruction has the workaround applied.  Older versions of
98 	 * gcc have correct umulsi3 and mulsi3, but other
99 	 * multiplication variants lack the workaround.
100 	 */
101 	asm volatile(
102 		""
103 		: "=r" (m1), "=r" (m2), "=r" (s)
104 		: "0" (m1), "1" (m2), "2" (s));
105 	align_mod(align, mod);
106 	p = m1 * m2;
107 	lv2 = s << 32;
108 	asm volatile(
109 		""
110 		: "=r" (lv2)
111 		: "0" (lv2), "r" (p));
112 	local_irq_restore(flags);
113 
114 	*v1 = lv1;
115 	*v2 = lv2;
116 	*w = lw;
117 }
118 
check_mult_sh(void)119 static __always_inline __init void check_mult_sh(void)
120 {
121 	long v1[8], v2[8], w[8];
122 	int bug, fix, i;
123 
124 	printk("Checking for the multiply/shift bug... ");
125 
126 	/*
127 	 * Testing discovered false negatives for certain code offsets
128 	 * into cache lines.  Hence we test all possible offsets for
129 	 * the worst assumption of an R4000 I-cache line width of 32
130 	 * bytes.
131 	 *
132 	 * We can't use a loop as alignment directives need to be
133 	 * immediates.
134 	 */
135 	mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
136 	mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
137 	mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
138 	mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
139 	mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
140 	mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
141 	mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
142 	mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
143 
144 	bug = 0;
145 	for (i = 0; i < 8; i++)
146 		if (v1[i] != w[i])
147 			bug = 1;
148 
149 	if (bug == 0) {
150 		pr_cont("no.\n");
151 		return;
152 	}
153 
154 	pr_cont("yes, workaround... ");
155 
156 	fix = 1;
157 	for (i = 0; i < 8; i++)
158 		if (v2[i] != w[i])
159 			fix = 0;
160 
161 	if (fix == 1) {
162 		pr_cont("yes.\n");
163 		return;
164 	}
165 
166 	pr_cont("no.\n");
167 	panic(bug64hit,
168 	      IS_ENABLED(CONFIG_CPU_R4000_WORKAROUNDS) ? nowar : r4kwar);
169 }
170 
171 static volatile int daddi_ov;
172 
do_daddi_ov(struct pt_regs * regs)173 asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
174 {
175 	enum ctx_state prev_state;
176 
177 	prev_state = exception_enter();
178 	daddi_ov = 1;
179 	regs->cp0_epc += 4;
180 	exception_exit(prev_state);
181 }
182 
check_daddi(void)183 static __init void check_daddi(void)
184 {
185 	extern asmlinkage void handle_daddi_ov(void);
186 	unsigned long flags;
187 	void *handler;
188 	long v, tmp;
189 
190 	printk("Checking for the daddi bug... ");
191 
192 	local_irq_save(flags);
193 	handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
194 	/*
195 	 * The following code fails to trigger an overflow exception
196 	 * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
197 	 * 00000430, respectively).
198 	 *
199 	 * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
200 	 * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
201 	 * I got no permission to duplicate it here, sigh... --macro
202 	 */
203 	asm volatile(
204 		".set	push\n\t"
205 		".set	noat\n\t"
206 		".set	noreorder\n\t"
207 		".set	nomacro\n\t"
208 		"addiu	%1, $0, %2\n\t"
209 		"dsrl	%1, %1, 1\n\t"
210 #ifdef HAVE_AS_SET_DADDI
211 		".set	daddi\n\t"
212 #endif
213 		"daddi	%0, %1, %3\n\t"
214 		".set	pop"
215 		: "=r" (v), "=&r" (tmp)
216 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
217 	set_except_vector(EXCCODE_OV, handler);
218 	local_irq_restore(flags);
219 
220 	if (daddi_ov) {
221 		pr_cont("no.\n");
222 		return;
223 	}
224 
225 	pr_cont("yes, workaround... ");
226 
227 	local_irq_save(flags);
228 	handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
229 	asm volatile(
230 		"addiu	%1, $0, %2\n\t"
231 		"dsrl	%1, %1, 1\n\t"
232 		"daddi	%0, %1, %3"
233 		: "=r" (v), "=&r" (tmp)
234 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
235 	set_except_vector(EXCCODE_OV, handler);
236 	local_irq_restore(flags);
237 
238 	if (daddi_ov) {
239 		pr_cont("yes.\n");
240 		return;
241 	}
242 
243 	pr_cont("no.\n");
244 	panic(bug64hit,
245 	      IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
246 }
247 
248 int daddiu_bug	= -1;
249 
check_daddiu(void)250 static __init void check_daddiu(void)
251 {
252 	long v, w, tmp;
253 
254 	printk("Checking for the daddiu bug... ");
255 
256 	/*
257 	 * The following code leads to a wrong result of daddiu when
258 	 * executed on R4400 rev. 1.0 (PRId 00000440).
259 	 *
260 	 * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
261 	 * MIPS Technologies, Inc., erratum #7 for details.
262 	 *
263 	 * According to "MIPS R4000PC/SC Errata, Processor Revision
264 	 * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
265 	 * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
266 	 * 00000430, respectively), too.  Testing failed to trigger it
267 	 * so far.
268 	 *
269 	 * I got no permission to duplicate the errata here, sigh...
270 	 * --macro
271 	 */
272 	asm volatile(
273 		".set	push\n\t"
274 		".set	noat\n\t"
275 		".set	noreorder\n\t"
276 		".set	nomacro\n\t"
277 		"addiu	%2, $0, %3\n\t"
278 		"dsrl	%2, %2, 1\n\t"
279 #ifdef HAVE_AS_SET_DADDI
280 		".set	daddi\n\t"
281 #endif
282 		"daddiu %0, %2, %4\n\t"
283 		"addiu	%1, $0, %4\n\t"
284 		"daddu	%1, %2\n\t"
285 		".set	pop"
286 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
287 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
288 
289 	daddiu_bug = v != w;
290 
291 	if (!daddiu_bug) {
292 		pr_cont("no.\n");
293 		return;
294 	}
295 
296 	pr_cont("yes, workaround... ");
297 
298 	asm volatile(
299 		"addiu	%2, $0, %3\n\t"
300 		"dsrl	%2, %2, 1\n\t"
301 		"daddiu %0, %2, %4\n\t"
302 		"addiu	%1, $0, %4\n\t"
303 		"daddu	%1, %2"
304 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
305 		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
306 
307 	if (v == w) {
308 		pr_cont("yes.\n");
309 		return;
310 	}
311 
312 	pr_cont("no.\n");
313 	panic(bug64hit,
314 	      IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
315 }
316 
check_bugs64_early(void)317 void __init check_bugs64_early(void)
318 {
319 	check_mult_sh();
320 	check_daddiu();
321 }
322 
check_bugs64(void)323 void __init check_bugs64(void)
324 {
325 	check_daddi();
326 }
327