xref: /linux/arch/mips/include/asm/r4kcache.h (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <linux/stringify.h>
16 
17 #include <asm/asm.h>
18 #include <asm/asm-eva.h>
19 #include <asm/cacheops.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cpu-type.h>
23 #include <asm/mipsmtregs.h>
24 #include <asm/mmzone.h>
25 #include <asm/unroll.h>
26 
27 extern void (*r4k_blast_dcache)(void);
28 extern void (*r4k_blast_icache)(void);
29 
30 /*
31  * This macro return a properly sign-extended address suitable as base address
32  * for indexed cache operations.  Two issues here:
33  *
34  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
35  *    the index bits from the virtual address.	This breaks with tradition
36  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
37  *    an address in KSEG0 / CKSEG0.
38  *  - We need a properly sign extended address for 64-bit code.	 To get away
39  *    without ifdefs we let the compiler do it by a type cast.
40  */
41 #define INDEX_BASE	CKSEG0
42 
43 #define _cache_op(insn, op, addr)					\
44 	__asm__ __volatile__(						\
45 	"	.set	push					\n"	\
46 	"	.set	noreorder				\n"	\
47 	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
48 	"	" insn("%0", "%1") "				\n"	\
49 	"	.set	pop					\n"	\
50 	:								\
51 	: "i" (op), "R" (*(unsigned char *)(addr)))
52 
53 #define cache_op(op, addr)						\
54 	_cache_op(kernel_cache, op, addr)
55 
56 static inline void flush_icache_line_indexed(unsigned long addr)
57 {
58 	cache_op(Index_Invalidate_I, addr);
59 }
60 
61 static inline void flush_dcache_line_indexed(unsigned long addr)
62 {
63 	cache_op(Index_Writeback_Inv_D, addr);
64 }
65 
66 static inline void flush_scache_line_indexed(unsigned long addr)
67 {
68 	cache_op(Index_Writeback_Inv_SD, addr);
69 }
70 
71 static inline void flush_icache_line(unsigned long addr)
72 {
73 	switch (boot_cpu_type()) {
74 	case CPU_LOONGSON2EF:
75 		cache_op(Hit_Invalidate_I_Loongson2, addr);
76 		break;
77 
78 	default:
79 		cache_op(Hit_Invalidate_I, addr);
80 		break;
81 	}
82 }
83 
84 static inline void flush_dcache_line(unsigned long addr)
85 {
86 	cache_op(Hit_Writeback_Inv_D, addr);
87 }
88 
89 static inline void invalidate_dcache_line(unsigned long addr)
90 {
91 	cache_op(Hit_Invalidate_D, addr);
92 }
93 
94 static inline void invalidate_scache_line(unsigned long addr)
95 {
96 	cache_op(Hit_Invalidate_SD, addr);
97 }
98 
99 static inline void flush_scache_line(unsigned long addr)
100 {
101 	cache_op(Hit_Writeback_Inv_SD, addr);
102 }
103 
104 #ifdef CONFIG_EVA
105 
106 #define protected_cache_op(op, addr)				\
107 ({								\
108 	int __err = 0;						\
109 	__asm__ __volatile__(					\
110 	"	.set	push			\n"		\
111 	"	.set	noreorder		\n"		\
112 	"	.set	mips0			\n"		\
113 	"	.set	eva			\n"		\
114 	"1:	cachee	%1, (%2)		\n"		\
115 	"2:	.insn				\n"		\
116 	"	.set	pop			\n"		\
117 	"	.section .fixup,\"ax\"		\n"		\
118 	"3:	li	%0, %3			\n"		\
119 	"	j	2b			\n"		\
120 	"	.previous			\n"		\
121 	"	.section __ex_table,\"a\"	\n"		\
122 	"	"STR(PTR_WD)" 1b, 3b		\n"		\
123 	"	.previous"					\
124 	: "+r" (__err)						\
125 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
126 	__err;							\
127 })
128 #else
129 
130 #define protected_cache_op(op, addr)				\
131 ({								\
132 	int __err = 0;						\
133 	__asm__ __volatile__(					\
134 	"	.set	push			\n"		\
135 	"	.set	noreorder		\n"		\
136 	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
137 	"1:	cache	%1, (%2)		\n"		\
138 	"2:	.insn				\n"		\
139 	"	.set	pop			\n"		\
140 	"	.section .fixup,\"ax\"		\n"		\
141 	"3:	li	%0, %3			\n"		\
142 	"	j	2b			\n"		\
143 	"	.previous			\n"		\
144 	"	.section __ex_table,\"a\"	\n"		\
145 	"	"STR(PTR_WD)" 1b, 3b		\n"		\
146 	"	.previous"					\
147 	: "+r" (__err)						\
148 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
149 	__err;							\
150 })
151 #endif
152 
153 /*
154  * The next two are for badland addresses like signal trampolines.
155  */
156 static inline int protected_flush_icache_line(unsigned long addr)
157 {
158 	switch (boot_cpu_type()) {
159 	case CPU_LOONGSON2EF:
160 		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
161 
162 	default:
163 		return protected_cache_op(Hit_Invalidate_I, addr);
164 	}
165 }
166 
167 /*
168  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
169  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
170  * caches.  We're talking about one cacheline unnecessarily getting invalidated
171  * here so the penalty isn't overly hard.
172  */
173 static inline int protected_writeback_dcache_line(unsigned long addr)
174 {
175 	return protected_cache_op(Hit_Writeback_Inv_D, addr);
176 }
177 
178 static inline int protected_writeback_scache_line(unsigned long addr)
179 {
180 	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
181 }
182 
183 /*
184  * This one is RM7000-specific
185  */
186 static inline void invalidate_tcache_page(unsigned long addr)
187 {
188 	cache_op(Page_Invalidate_T, addr);
189 }
190 
191 #define cache_unroll(times, insn, op, addr, lsize) do {			\
192 	int i = 0;							\
193 	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
194 } while (0)
195 
196 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
197 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
198 static inline void extra##blast_##pfx##cache##lsize(void)		\
199 {									\
200 	unsigned long start = INDEX_BASE;				\
201 	unsigned long end = start + current_cpu_data.desc.waysize;	\
202 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
203 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
204 			       current_cpu_data.desc.waybit;		\
205 	unsigned long ws, addr;						\
206 									\
207 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
208 		for (addr = start; addr < end; addr += lsize * 32)	\
209 			cache_unroll(32, kernel_cache, indexop,		\
210 				     addr | ws, lsize);			\
211 }									\
212 									\
213 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
214 {									\
215 	unsigned long start = page;					\
216 	unsigned long end = page + PAGE_SIZE;				\
217 									\
218 	do {								\
219 		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
220 		start += lsize * 32;					\
221 	} while (start < end);						\
222 }									\
223 									\
224 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
225 {									\
226 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
227 	unsigned long start = INDEX_BASE + (page & indexmask);		\
228 	unsigned long end = start + PAGE_SIZE;				\
229 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
230 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
231 			       current_cpu_data.desc.waybit;		\
232 	unsigned long ws, addr;						\
233 									\
234 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
235 		for (addr = start; addr < end; addr += lsize * 32)	\
236 			cache_unroll(32, kernel_cache, indexop,		\
237 				     addr | ws, lsize);			\
238 }
239 
240 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
241 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
242 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
243 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
244 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
245 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
246 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
247 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
248 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
249 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
250 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
251 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
252 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
253 
254 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
255 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
256 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
257 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
258 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
259 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
260 
261 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
262 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
263 {									\
264 	unsigned long start = page;					\
265 	unsigned long end = page + PAGE_SIZE;				\
266 									\
267 	do {								\
268 		cache_unroll(32, user_cache, hitop, start, lsize);	\
269 		start += lsize * 32;					\
270 	} while (start < end);						\
271 }
272 
273 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
274 			 16)
275 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
276 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
277 			 32)
278 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
279 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
280 			 64)
281 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
282 
283 /* build blast_xxx_range, protected_blast_xxx_range */
284 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
285 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
286 						    unsigned long end)	\
287 {									\
288 	unsigned long lsize = cpu_##desc##_line_size();			\
289 	unsigned long addr = start & ~(lsize - 1);			\
290 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
291 									\
292 	while (1) {							\
293 		prot##cache_op(hitop, addr);				\
294 		if (addr == aend)					\
295 			break;						\
296 		addr += lsize;						\
297 	}								\
298 }
299 
300 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
301 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
302 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
303 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
304 	protected_, loongson2_)
305 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
306 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
307 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
308 /* blast_inv_dcache_range */
309 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
310 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
311 
312 /* Currently, this is very specific to Loongson-3 */
313 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
314 static inline void blast_##pfx##cache##lsize##_node(long node)		\
315 {									\
316 	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
317 	unsigned long end = start + current_cpu_data.desc.waysize;	\
318 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
319 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
320 			       current_cpu_data.desc.waybit;		\
321 	unsigned long ws, addr;						\
322 									\
323 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
324 		for (addr = start; addr < end; addr += lsize * 32)	\
325 			cache_unroll(32, kernel_cache, indexop,		\
326 				     addr | ws, lsize);			\
327 }
328 
329 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
330 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
331 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
332 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
333 
334 #endif /* _ASM_R4KCACHE_H */
335