xref: /linux/arch/mips/lib/mips-atomic.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Copyright (C) 2000 MIPS Technologies, Inc.
10  */
11 #include <asm/irqflags.h>
12 #include <asm/hazards.h>
13 #include <linux/compiler.h>
14 #include <linux/preempt.h>
15 #include <linux/export.h>
16 #include <linux/stringify.h>
17 
18 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
19 
20 /*
21  * For cli() we have to insert nops to make sure that the new value
22  * has actually arrived in the status register before the end of this
23  * macro.
24  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
25  * no nops at all.
26  */
27 /*
28  * For TX49, operating only IE bit is not enough.
29  *
30  * If mfc0 $12 follows store and the mfc0 is last instruction of a
31  * page and fetching the next instruction causes TLB miss, the result
32  * of the mfc0 might wrongly contain EXL bit.
33  *
34  * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
35  *
36  * Workaround: mask EXL bit of the result or place a nop before mfc0.
37  */
38 notrace void arch_local_irq_disable(void)
39 {
40 	preempt_disable();
41 
42 	__asm__ __volatile__(
43 	"	.set	push						\n"
44 	"	.set	noat						\n"
45 #ifdef CONFIG_MIPS_MT_SMTC
46 	"	mfc0	$1, $2, 1					\n"
47 	"	ori	$1, 0x400					\n"
48 	"	.set	noreorder					\n"
49 	"	mtc0	$1, $2, 1					\n"
50 #elif defined(CONFIG_CPU_MIPSR2)
51 	/* see irqflags.h for inline function */
52 #else
53 	"	mfc0	$1,$12						\n"
54 	"	ori	$1,0x1f						\n"
55 	"	xori	$1,0x1f						\n"
56 	"	.set	noreorder					\n"
57 	"	mtc0	$1,$12						\n"
58 #endif
59 	"	" __stringify(__irq_disable_hazard) "			\n"
60 	"	.set	pop						\n"
61 	: /* no outputs */
62 	: /* no inputs */
63 	: "memory");
64 
65 	preempt_enable();
66 }
67 EXPORT_SYMBOL(arch_local_irq_disable);
68 
69 
70 notrace unsigned long arch_local_irq_save(void)
71 {
72 	unsigned long flags;
73 
74 	preempt_disable();
75 
76 	__asm__ __volatile__(
77 	"	.set	push						\n"
78 	"	.set	reorder						\n"
79 	"	.set	noat						\n"
80 #ifdef CONFIG_MIPS_MT_SMTC
81 	"	mfc0	%[flags], $2, 1				\n"
82 	"	ori	$1, %[flags], 0x400				\n"
83 	"	.set	noreorder					\n"
84 	"	mtc0	$1, $2, 1					\n"
85 	"	andi	%[flags], %[flags], 0x400			\n"
86 #elif defined(CONFIG_CPU_MIPSR2)
87 	/* see irqflags.h for inline function */
88 #else
89 	"	mfc0	%[flags], $12					\n"
90 	"	ori	$1, %[flags], 0x1f				\n"
91 	"	xori	$1, 0x1f					\n"
92 	"	.set	noreorder					\n"
93 	"	mtc0	$1, $12						\n"
94 #endif
95 	"	" __stringify(__irq_disable_hazard) "			\n"
96 	"	.set	pop						\n"
97 	: [flags] "=r" (flags)
98 	: /* no inputs */
99 	: "memory");
100 
101 	preempt_enable();
102 
103 	return flags;
104 }
105 EXPORT_SYMBOL(arch_local_irq_save);
106 
107 notrace void arch_local_irq_restore(unsigned long flags)
108 {
109 	unsigned long __tmp1;
110 
111 #ifdef CONFIG_MIPS_MT_SMTC
112 	/*
113 	 * SMTC kernel needs to do a software replay of queued
114 	 * IPIs, at the cost of branch and call overhead on each
115 	 * local_irq_restore()
116 	 */
117 	if (unlikely(!(flags & 0x0400)))
118 		smtc_ipi_replay();
119 #endif
120 	preempt_disable();
121 
122 	__asm__ __volatile__(
123 	"	.set	push						\n"
124 	"	.set	noreorder					\n"
125 	"	.set	noat						\n"
126 #ifdef CONFIG_MIPS_MT_SMTC
127 	"	mfc0	$1, $2, 1					\n"
128 	"	andi	%[flags], 0x400					\n"
129 	"	ori	$1, 0x400					\n"
130 	"	xori	$1, 0x400					\n"
131 	"	or	%[flags], $1					\n"
132 	"	mtc0	%[flags], $2, 1					\n"
133 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
134 	/* see irqflags.h for inline function */
135 #elif defined(CONFIG_CPU_MIPSR2)
136 	/* see irqflags.h for inline function */
137 #else
138 	"	mfc0	$1, $12						\n"
139 	"	andi	%[flags], 1					\n"
140 	"	ori	$1, 0x1f					\n"
141 	"	xori	$1, 0x1f					\n"
142 	"	or	%[flags], $1					\n"
143 	"	mtc0	%[flags], $12					\n"
144 #endif
145 	"	" __stringify(__irq_disable_hazard) "			\n"
146 	"	.set	pop						\n"
147 	: [flags] "=r" (__tmp1)
148 	: "0" (flags)
149 	: "memory");
150 
151 	preempt_enable();
152 }
153 EXPORT_SYMBOL(arch_local_irq_restore);
154 
155 
156 notrace void __arch_local_irq_restore(unsigned long flags)
157 {
158 	unsigned long __tmp1;
159 
160 	preempt_disable();
161 
162 	__asm__ __volatile__(
163 	"	.set	push						\n"
164 	"	.set	noreorder					\n"
165 	"	.set	noat						\n"
166 #ifdef CONFIG_MIPS_MT_SMTC
167 	"	mfc0	$1, $2, 1					\n"
168 	"	andi	%[flags], 0x400					\n"
169 	"	ori	$1, 0x400					\n"
170 	"	xori	$1, 0x400					\n"
171 	"	or	%[flags], $1					\n"
172 	"	mtc0	%[flags], $2, 1					\n"
173 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
174 	/* see irqflags.h for inline function */
175 #elif defined(CONFIG_CPU_MIPSR2)
176 	/* see irqflags.h for inline function */
177 #else
178 	"	mfc0	$1, $12						\n"
179 	"	andi	%[flags], 1					\n"
180 	"	ori	$1, 0x1f					\n"
181 	"	xori	$1, 0x1f					\n"
182 	"	or	%[flags], $1					\n"
183 	"	mtc0	%[flags], $12					\n"
184 #endif
185 	"	" __stringify(__irq_disable_hazard) "			\n"
186 	"	.set	pop						\n"
187 	: [flags] "=r" (__tmp1)
188 	: "0" (flags)
189 	: "memory");
190 
191 	preempt_enable();
192 }
193 EXPORT_SYMBOL(__arch_local_irq_restore);
194 
195 #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
196