xref: /linux/arch/mips/mm/c-r4k.c (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/bitops.h>
22 
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
27 #include <asm/cpu.h>
28 #include <asm/cpu-features.h>
29 #include <asm/cpu-type.h>
30 #include <asm/io.h>
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
36 #include <asm/war.h>
37 #include <asm/cacheflush.h> /* for run_uncached() */
38 #include <asm/traps.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/mips-cm.h>
41 
42 /*
43  * Special Variant of smp_call_function for use by cache functions:
44  *
45  *  o No return value
46  *  o collapses to normal function call on UP kernels
47  *  o collapses to normal function call on systems with a single shared
48  *    primary cache.
49  *  o doesn't disable interrupts on the local CPU
50  */
51 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
52 {
53 	preempt_disable();
54 
55 	/*
56 	 * The Coherent Manager propagates address-based cache ops to other
57 	 * cores but not index-based ops. However, r4k_on_each_cpu is used
58 	 * in both cases so there is no easy way to tell what kind of op is
59 	 * executed to the other cores. The best we can probably do is
60 	 * to restrict that call when a CM is not present because both
61 	 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
62 	 */
63 	if (!mips_cm_present())
64 		smp_call_function_many(&cpu_foreign_map, func, info, 1);
65 	func(info);
66 	preempt_enable();
67 }
68 
69 #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
70 #define cpu_has_safe_index_cacheops 0
71 #else
72 #define cpu_has_safe_index_cacheops 1
73 #endif
74 
75 /*
76  * Must die.
77  */
78 static unsigned long icache_size __read_mostly;
79 static unsigned long dcache_size __read_mostly;
80 static unsigned long scache_size __read_mostly;
81 
82 /*
83  * Dummy cache handling routines for machines without boardcaches
84  */
85 static void cache_noop(void) {}
86 
87 static struct bcache_ops no_sc_ops = {
88 	.bc_enable = (void *)cache_noop,
89 	.bc_disable = (void *)cache_noop,
90 	.bc_wback_inv = (void *)cache_noop,
91 	.bc_inv = (void *)cache_noop
92 };
93 
94 struct bcache_ops *bcops = &no_sc_ops;
95 
96 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
97 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
98 
99 #define R4600_HIT_CACHEOP_WAR_IMPL					\
100 do {									\
101 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
102 		*(volatile unsigned long *)CKSEG1;			\
103 	if (R4600_V1_HIT_CACHEOP_WAR)					\
104 		__asm__ __volatile__("nop;nop;nop;nop");		\
105 } while (0)
106 
107 static void (*r4k_blast_dcache_page)(unsigned long addr);
108 
109 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
110 {
111 	R4600_HIT_CACHEOP_WAR_IMPL;
112 	blast_dcache32_page(addr);
113 }
114 
115 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
116 {
117 	blast_dcache64_page(addr);
118 }
119 
120 static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
121 {
122 	blast_dcache128_page(addr);
123 }
124 
125 static void r4k_blast_dcache_page_setup(void)
126 {
127 	unsigned long  dc_lsize = cpu_dcache_line_size();
128 
129 	switch (dc_lsize) {
130 	case 0:
131 		r4k_blast_dcache_page = (void *)cache_noop;
132 		break;
133 	case 16:
134 		r4k_blast_dcache_page = blast_dcache16_page;
135 		break;
136 	case 32:
137 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
138 		break;
139 	case 64:
140 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
141 		break;
142 	case 128:
143 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
144 		break;
145 	default:
146 		break;
147 	}
148 }
149 
150 #ifndef CONFIG_EVA
151 #define r4k_blast_dcache_user_page  r4k_blast_dcache_page
152 #else
153 
154 static void (*r4k_blast_dcache_user_page)(unsigned long addr);
155 
156 static void r4k_blast_dcache_user_page_setup(void)
157 {
158 	unsigned long  dc_lsize = cpu_dcache_line_size();
159 
160 	if (dc_lsize == 0)
161 		r4k_blast_dcache_user_page = (void *)cache_noop;
162 	else if (dc_lsize == 16)
163 		r4k_blast_dcache_user_page = blast_dcache16_user_page;
164 	else if (dc_lsize == 32)
165 		r4k_blast_dcache_user_page = blast_dcache32_user_page;
166 	else if (dc_lsize == 64)
167 		r4k_blast_dcache_user_page = blast_dcache64_user_page;
168 }
169 
170 #endif
171 
172 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
173 
174 static void r4k_blast_dcache_page_indexed_setup(void)
175 {
176 	unsigned long dc_lsize = cpu_dcache_line_size();
177 
178 	if (dc_lsize == 0)
179 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
180 	else if (dc_lsize == 16)
181 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
182 	else if (dc_lsize == 32)
183 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
184 	else if (dc_lsize == 64)
185 		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
186 	else if (dc_lsize == 128)
187 		r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
188 }
189 
190 void (* r4k_blast_dcache)(void);
191 EXPORT_SYMBOL(r4k_blast_dcache);
192 
193 static void r4k_blast_dcache_setup(void)
194 {
195 	unsigned long dc_lsize = cpu_dcache_line_size();
196 
197 	if (dc_lsize == 0)
198 		r4k_blast_dcache = (void *)cache_noop;
199 	else if (dc_lsize == 16)
200 		r4k_blast_dcache = blast_dcache16;
201 	else if (dc_lsize == 32)
202 		r4k_blast_dcache = blast_dcache32;
203 	else if (dc_lsize == 64)
204 		r4k_blast_dcache = blast_dcache64;
205 	else if (dc_lsize == 128)
206 		r4k_blast_dcache = blast_dcache128;
207 }
208 
209 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
210 #define JUMP_TO_ALIGN(order) \
211 	__asm__ __volatile__( \
212 		"b\t1f\n\t" \
213 		".align\t" #order "\n\t" \
214 		"1:\n\t" \
215 		)
216 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
217 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
218 
219 static inline void blast_r4600_v1_icache32(void)
220 {
221 	unsigned long flags;
222 
223 	local_irq_save(flags);
224 	blast_icache32();
225 	local_irq_restore(flags);
226 }
227 
228 static inline void tx49_blast_icache32(void)
229 {
230 	unsigned long start = INDEX_BASE;
231 	unsigned long end = start + current_cpu_data.icache.waysize;
232 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
233 	unsigned long ws_end = current_cpu_data.icache.ways <<
234 			       current_cpu_data.icache.waybit;
235 	unsigned long ws, addr;
236 
237 	CACHE32_UNROLL32_ALIGN2;
238 	/* I'm in even chunk.  blast odd chunks */
239 	for (ws = 0; ws < ws_end; ws += ws_inc)
240 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
241 			cache32_unroll32(addr|ws, Index_Invalidate_I);
242 	CACHE32_UNROLL32_ALIGN;
243 	/* I'm in odd chunk.  blast even chunks */
244 	for (ws = 0; ws < ws_end; ws += ws_inc)
245 		for (addr = start; addr < end; addr += 0x400 * 2)
246 			cache32_unroll32(addr|ws, Index_Invalidate_I);
247 }
248 
249 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
250 {
251 	unsigned long flags;
252 
253 	local_irq_save(flags);
254 	blast_icache32_page_indexed(page);
255 	local_irq_restore(flags);
256 }
257 
258 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
259 {
260 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
261 	unsigned long start = INDEX_BASE + (page & indexmask);
262 	unsigned long end = start + PAGE_SIZE;
263 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
264 	unsigned long ws_end = current_cpu_data.icache.ways <<
265 			       current_cpu_data.icache.waybit;
266 	unsigned long ws, addr;
267 
268 	CACHE32_UNROLL32_ALIGN2;
269 	/* I'm in even chunk.  blast odd chunks */
270 	for (ws = 0; ws < ws_end; ws += ws_inc)
271 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
272 			cache32_unroll32(addr|ws, Index_Invalidate_I);
273 	CACHE32_UNROLL32_ALIGN;
274 	/* I'm in odd chunk.  blast even chunks */
275 	for (ws = 0; ws < ws_end; ws += ws_inc)
276 		for (addr = start; addr < end; addr += 0x400 * 2)
277 			cache32_unroll32(addr|ws, Index_Invalidate_I);
278 }
279 
280 static void (* r4k_blast_icache_page)(unsigned long addr);
281 
282 static void r4k_blast_icache_page_setup(void)
283 {
284 	unsigned long ic_lsize = cpu_icache_line_size();
285 
286 	if (ic_lsize == 0)
287 		r4k_blast_icache_page = (void *)cache_noop;
288 	else if (ic_lsize == 16)
289 		r4k_blast_icache_page = blast_icache16_page;
290 	else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
291 		r4k_blast_icache_page = loongson2_blast_icache32_page;
292 	else if (ic_lsize == 32)
293 		r4k_blast_icache_page = blast_icache32_page;
294 	else if (ic_lsize == 64)
295 		r4k_blast_icache_page = blast_icache64_page;
296 	else if (ic_lsize == 128)
297 		r4k_blast_icache_page = blast_icache128_page;
298 }
299 
300 #ifndef CONFIG_EVA
301 #define r4k_blast_icache_user_page  r4k_blast_icache_page
302 #else
303 
304 static void (*r4k_blast_icache_user_page)(unsigned long addr);
305 
306 static void r4k_blast_icache_user_page_setup(void)
307 {
308 	unsigned long ic_lsize = cpu_icache_line_size();
309 
310 	if (ic_lsize == 0)
311 		r4k_blast_icache_user_page = (void *)cache_noop;
312 	else if (ic_lsize == 16)
313 		r4k_blast_icache_user_page = blast_icache16_user_page;
314 	else if (ic_lsize == 32)
315 		r4k_blast_icache_user_page = blast_icache32_user_page;
316 	else if (ic_lsize == 64)
317 		r4k_blast_icache_user_page = blast_icache64_user_page;
318 }
319 
320 #endif
321 
322 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
323 
324 static void r4k_blast_icache_page_indexed_setup(void)
325 {
326 	unsigned long ic_lsize = cpu_icache_line_size();
327 
328 	if (ic_lsize == 0)
329 		r4k_blast_icache_page_indexed = (void *)cache_noop;
330 	else if (ic_lsize == 16)
331 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
332 	else if (ic_lsize == 32) {
333 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
334 			r4k_blast_icache_page_indexed =
335 				blast_icache32_r4600_v1_page_indexed;
336 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
337 			r4k_blast_icache_page_indexed =
338 				tx49_blast_icache32_page_indexed;
339 		else if (current_cpu_type() == CPU_LOONGSON2)
340 			r4k_blast_icache_page_indexed =
341 				loongson2_blast_icache32_page_indexed;
342 		else
343 			r4k_blast_icache_page_indexed =
344 				blast_icache32_page_indexed;
345 	} else if (ic_lsize == 64)
346 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
347 }
348 
349 void (* r4k_blast_icache)(void);
350 EXPORT_SYMBOL(r4k_blast_icache);
351 
352 static void r4k_blast_icache_setup(void)
353 {
354 	unsigned long ic_lsize = cpu_icache_line_size();
355 
356 	if (ic_lsize == 0)
357 		r4k_blast_icache = (void *)cache_noop;
358 	else if (ic_lsize == 16)
359 		r4k_blast_icache = blast_icache16;
360 	else if (ic_lsize == 32) {
361 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
362 			r4k_blast_icache = blast_r4600_v1_icache32;
363 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
364 			r4k_blast_icache = tx49_blast_icache32;
365 		else if (current_cpu_type() == CPU_LOONGSON2)
366 			r4k_blast_icache = loongson2_blast_icache32;
367 		else
368 			r4k_blast_icache = blast_icache32;
369 	} else if (ic_lsize == 64)
370 		r4k_blast_icache = blast_icache64;
371 	else if (ic_lsize == 128)
372 		r4k_blast_icache = blast_icache128;
373 }
374 
375 static void (* r4k_blast_scache_page)(unsigned long addr);
376 
377 static void r4k_blast_scache_page_setup(void)
378 {
379 	unsigned long sc_lsize = cpu_scache_line_size();
380 
381 	if (scache_size == 0)
382 		r4k_blast_scache_page = (void *)cache_noop;
383 	else if (sc_lsize == 16)
384 		r4k_blast_scache_page = blast_scache16_page;
385 	else if (sc_lsize == 32)
386 		r4k_blast_scache_page = blast_scache32_page;
387 	else if (sc_lsize == 64)
388 		r4k_blast_scache_page = blast_scache64_page;
389 	else if (sc_lsize == 128)
390 		r4k_blast_scache_page = blast_scache128_page;
391 }
392 
393 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
394 
395 static void r4k_blast_scache_page_indexed_setup(void)
396 {
397 	unsigned long sc_lsize = cpu_scache_line_size();
398 
399 	if (scache_size == 0)
400 		r4k_blast_scache_page_indexed = (void *)cache_noop;
401 	else if (sc_lsize == 16)
402 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
403 	else if (sc_lsize == 32)
404 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
405 	else if (sc_lsize == 64)
406 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
407 	else if (sc_lsize == 128)
408 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
409 }
410 
411 static void (* r4k_blast_scache)(void);
412 
413 static void r4k_blast_scache_setup(void)
414 {
415 	unsigned long sc_lsize = cpu_scache_line_size();
416 
417 	if (scache_size == 0)
418 		r4k_blast_scache = (void *)cache_noop;
419 	else if (sc_lsize == 16)
420 		r4k_blast_scache = blast_scache16;
421 	else if (sc_lsize == 32)
422 		r4k_blast_scache = blast_scache32;
423 	else if (sc_lsize == 64)
424 		r4k_blast_scache = blast_scache64;
425 	else if (sc_lsize == 128)
426 		r4k_blast_scache = blast_scache128;
427 }
428 
429 static inline void local_r4k___flush_cache_all(void * args)
430 {
431 	switch (current_cpu_type()) {
432 	case CPU_LOONGSON2:
433 	case CPU_LOONGSON3:
434 	case CPU_R4000SC:
435 	case CPU_R4000MC:
436 	case CPU_R4400SC:
437 	case CPU_R4400MC:
438 	case CPU_R10000:
439 	case CPU_R12000:
440 	case CPU_R14000:
441 	case CPU_R16000:
442 		/*
443 		 * These caches are inclusive caches, that is, if something
444 		 * is not cached in the S-cache, we know it also won't be
445 		 * in one of the primary caches.
446 		 */
447 		r4k_blast_scache();
448 		break;
449 
450 	default:
451 		r4k_blast_dcache();
452 		r4k_blast_icache();
453 		break;
454 	}
455 }
456 
457 static void r4k___flush_cache_all(void)
458 {
459 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
460 }
461 
462 static inline int has_valid_asid(const struct mm_struct *mm)
463 {
464 #ifdef CONFIG_MIPS_MT_SMP
465 	int i;
466 
467 	for_each_online_cpu(i)
468 		if (cpu_context(i, mm))
469 			return 1;
470 
471 	return 0;
472 #else
473 	return cpu_context(smp_processor_id(), mm);
474 #endif
475 }
476 
477 static void r4k__flush_cache_vmap(void)
478 {
479 	r4k_blast_dcache();
480 }
481 
482 static void r4k__flush_cache_vunmap(void)
483 {
484 	r4k_blast_dcache();
485 }
486 
487 static inline void local_r4k_flush_cache_range(void * args)
488 {
489 	struct vm_area_struct *vma = args;
490 	int exec = vma->vm_flags & VM_EXEC;
491 
492 	if (!(has_valid_asid(vma->vm_mm)))
493 		return;
494 
495 	r4k_blast_dcache();
496 	if (exec)
497 		r4k_blast_icache();
498 }
499 
500 static void r4k_flush_cache_range(struct vm_area_struct *vma,
501 	unsigned long start, unsigned long end)
502 {
503 	int exec = vma->vm_flags & VM_EXEC;
504 
505 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
506 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
507 }
508 
509 static inline void local_r4k_flush_cache_mm(void * args)
510 {
511 	struct mm_struct *mm = args;
512 
513 	if (!has_valid_asid(mm))
514 		return;
515 
516 	/*
517 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
518 	 * only flush the primary caches but R1x000 behave sane ...
519 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
520 	 * caches, so we can bail out early.
521 	 */
522 	if (current_cpu_type() == CPU_R4000SC ||
523 	    current_cpu_type() == CPU_R4000MC ||
524 	    current_cpu_type() == CPU_R4400SC ||
525 	    current_cpu_type() == CPU_R4400MC) {
526 		r4k_blast_scache();
527 		return;
528 	}
529 
530 	r4k_blast_dcache();
531 }
532 
533 static void r4k_flush_cache_mm(struct mm_struct *mm)
534 {
535 	if (!cpu_has_dc_aliases)
536 		return;
537 
538 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
539 }
540 
541 struct flush_cache_page_args {
542 	struct vm_area_struct *vma;
543 	unsigned long addr;
544 	unsigned long pfn;
545 };
546 
547 static inline void local_r4k_flush_cache_page(void *args)
548 {
549 	struct flush_cache_page_args *fcp_args = args;
550 	struct vm_area_struct *vma = fcp_args->vma;
551 	unsigned long addr = fcp_args->addr;
552 	struct page *page = pfn_to_page(fcp_args->pfn);
553 	int exec = vma->vm_flags & VM_EXEC;
554 	struct mm_struct *mm = vma->vm_mm;
555 	int map_coherent = 0;
556 	pgd_t *pgdp;
557 	pud_t *pudp;
558 	pmd_t *pmdp;
559 	pte_t *ptep;
560 	void *vaddr;
561 
562 	/*
563 	 * If ownes no valid ASID yet, cannot possibly have gotten
564 	 * this page into the cache.
565 	 */
566 	if (!has_valid_asid(mm))
567 		return;
568 
569 	addr &= PAGE_MASK;
570 	pgdp = pgd_offset(mm, addr);
571 	pudp = pud_offset(pgdp, addr);
572 	pmdp = pmd_offset(pudp, addr);
573 	ptep = pte_offset(pmdp, addr);
574 
575 	/*
576 	 * If the page isn't marked valid, the page cannot possibly be
577 	 * in the cache.
578 	 */
579 	if (!(pte_present(*ptep)))
580 		return;
581 
582 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
583 		vaddr = NULL;
584 	else {
585 		/*
586 		 * Use kmap_coherent or kmap_atomic to do flushes for
587 		 * another ASID than the current one.
588 		 */
589 		map_coherent = (cpu_has_dc_aliases &&
590 				page_mapped(page) && !Page_dcache_dirty(page));
591 		if (map_coherent)
592 			vaddr = kmap_coherent(page, addr);
593 		else
594 			vaddr = kmap_atomic(page);
595 		addr = (unsigned long)vaddr;
596 	}
597 
598 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
599 		vaddr ? r4k_blast_dcache_page(addr) :
600 			r4k_blast_dcache_user_page(addr);
601 		if (exec && !cpu_icache_snoops_remote_store)
602 			r4k_blast_scache_page(addr);
603 	}
604 	if (exec) {
605 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
606 			int cpu = smp_processor_id();
607 
608 			if (cpu_context(cpu, mm) != 0)
609 				drop_mmu_context(mm, cpu);
610 		} else
611 			vaddr ? r4k_blast_icache_page(addr) :
612 				r4k_blast_icache_user_page(addr);
613 	}
614 
615 	if (vaddr) {
616 		if (map_coherent)
617 			kunmap_coherent();
618 		else
619 			kunmap_atomic(vaddr);
620 	}
621 }
622 
623 static void r4k_flush_cache_page(struct vm_area_struct *vma,
624 	unsigned long addr, unsigned long pfn)
625 {
626 	struct flush_cache_page_args args;
627 
628 	args.vma = vma;
629 	args.addr = addr;
630 	args.pfn = pfn;
631 
632 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
633 }
634 
635 static inline void local_r4k_flush_data_cache_page(void * addr)
636 {
637 	r4k_blast_dcache_page((unsigned long) addr);
638 }
639 
640 static void r4k_flush_data_cache_page(unsigned long addr)
641 {
642 	if (in_atomic())
643 		local_r4k_flush_data_cache_page((void *)addr);
644 	else
645 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
646 }
647 
648 struct flush_icache_range_args {
649 	unsigned long start;
650 	unsigned long end;
651 };
652 
653 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
654 {
655 	if (!cpu_has_ic_fills_f_dc) {
656 		if (end - start >= dcache_size) {
657 			r4k_blast_dcache();
658 		} else {
659 			R4600_HIT_CACHEOP_WAR_IMPL;
660 			protected_blast_dcache_range(start, end);
661 		}
662 	}
663 
664 	if (end - start > icache_size)
665 		r4k_blast_icache();
666 	else {
667 		switch (boot_cpu_type()) {
668 		case CPU_LOONGSON2:
669 			protected_loongson2_blast_icache_range(start, end);
670 			break;
671 
672 		default:
673 			protected_blast_icache_range(start, end);
674 			break;
675 		}
676 	}
677 #ifdef CONFIG_EVA
678 	/*
679 	 * Due to all possible segment mappings, there might cache aliases
680 	 * caused by the bootloader being in non-EVA mode, and the CPU switching
681 	 * to EVA during early kernel init. It's best to flush the scache
682 	 * to avoid having secondary cores fetching stale data and lead to
683 	 * kernel crashes.
684 	 */
685 	bc_wback_inv(start, (end - start));
686 	__sync();
687 #endif
688 }
689 
690 static inline void local_r4k_flush_icache_range_ipi(void *args)
691 {
692 	struct flush_icache_range_args *fir_args = args;
693 	unsigned long start = fir_args->start;
694 	unsigned long end = fir_args->end;
695 
696 	local_r4k_flush_icache_range(start, end);
697 }
698 
699 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
700 {
701 	struct flush_icache_range_args args;
702 
703 	args.start = start;
704 	args.end = end;
705 
706 	r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
707 	instruction_hazard();
708 }
709 
710 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
711 
712 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
713 {
714 	/* Catch bad driver code */
715 	BUG_ON(size == 0);
716 
717 	preempt_disable();
718 	if (cpu_has_inclusive_pcaches) {
719 		if (size >= scache_size)
720 			r4k_blast_scache();
721 		else
722 			blast_scache_range(addr, addr + size);
723 		preempt_enable();
724 		__sync();
725 		return;
726 	}
727 
728 	/*
729 	 * Either no secondary cache or the available caches don't have the
730 	 * subset property so we have to flush the primary caches
731 	 * explicitly
732 	 */
733 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
734 		r4k_blast_dcache();
735 	} else {
736 		R4600_HIT_CACHEOP_WAR_IMPL;
737 		blast_dcache_range(addr, addr + size);
738 	}
739 	preempt_enable();
740 
741 	bc_wback_inv(addr, size);
742 	__sync();
743 }
744 
745 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
746 {
747 	/* Catch bad driver code */
748 	BUG_ON(size == 0);
749 
750 	preempt_disable();
751 	if (cpu_has_inclusive_pcaches) {
752 		if (size >= scache_size)
753 			r4k_blast_scache();
754 		else {
755 			/*
756 			 * There is no clearly documented alignment requirement
757 			 * for the cache instruction on MIPS processors and
758 			 * some processors, among them the RM5200 and RM7000
759 			 * QED processors will throw an address error for cache
760 			 * hit ops with insufficient alignment.	 Solved by
761 			 * aligning the address to cache line size.
762 			 */
763 			blast_inv_scache_range(addr, addr + size);
764 		}
765 		preempt_enable();
766 		__sync();
767 		return;
768 	}
769 
770 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
771 		r4k_blast_dcache();
772 	} else {
773 		R4600_HIT_CACHEOP_WAR_IMPL;
774 		blast_inv_dcache_range(addr, addr + size);
775 	}
776 	preempt_enable();
777 
778 	bc_inv(addr, size);
779 	__sync();
780 }
781 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
782 
783 /*
784  * While we're protected against bad userland addresses we don't care
785  * very much about what happens in that case.  Usually a segmentation
786  * fault will dump the process later on anyway ...
787  */
788 static void local_r4k_flush_cache_sigtramp(void * arg)
789 {
790 	unsigned long ic_lsize = cpu_icache_line_size();
791 	unsigned long dc_lsize = cpu_dcache_line_size();
792 	unsigned long sc_lsize = cpu_scache_line_size();
793 	unsigned long addr = (unsigned long) arg;
794 
795 	R4600_HIT_CACHEOP_WAR_IMPL;
796 	if (dc_lsize)
797 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
798 	if (!cpu_icache_snoops_remote_store && scache_size)
799 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
800 	if (ic_lsize)
801 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
802 	if (MIPS4K_ICACHE_REFILL_WAR) {
803 		__asm__ __volatile__ (
804 			".set push\n\t"
805 			".set noat\n\t"
806 			".set "MIPS_ISA_LEVEL"\n\t"
807 #ifdef CONFIG_32BIT
808 			"la	$at,1f\n\t"
809 #endif
810 #ifdef CONFIG_64BIT
811 			"dla	$at,1f\n\t"
812 #endif
813 			"cache	%0,($at)\n\t"
814 			"nop; nop; nop\n"
815 			"1:\n\t"
816 			".set pop"
817 			:
818 			: "i" (Hit_Invalidate_I));
819 	}
820 	if (MIPS_CACHE_SYNC_WAR)
821 		__asm__ __volatile__ ("sync");
822 }
823 
824 static void r4k_flush_cache_sigtramp(unsigned long addr)
825 {
826 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
827 }
828 
829 static void r4k_flush_icache_all(void)
830 {
831 	if (cpu_has_vtag_icache)
832 		r4k_blast_icache();
833 }
834 
835 struct flush_kernel_vmap_range_args {
836 	unsigned long	vaddr;
837 	int		size;
838 };
839 
840 static inline void local_r4k_flush_kernel_vmap_range(void *args)
841 {
842 	struct flush_kernel_vmap_range_args *vmra = args;
843 	unsigned long vaddr = vmra->vaddr;
844 	int size = vmra->size;
845 
846 	/*
847 	 * Aliases only affect the primary caches so don't bother with
848 	 * S-caches or T-caches.
849 	 */
850 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
851 		r4k_blast_dcache();
852 	else {
853 		R4600_HIT_CACHEOP_WAR_IMPL;
854 		blast_dcache_range(vaddr, vaddr + size);
855 	}
856 }
857 
858 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
859 {
860 	struct flush_kernel_vmap_range_args args;
861 
862 	args.vaddr = (unsigned long) vaddr;
863 	args.size = size;
864 
865 	r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
866 }
867 
868 static inline void rm7k_erratum31(void)
869 {
870 	const unsigned long ic_lsize = 32;
871 	unsigned long addr;
872 
873 	/* RM7000 erratum #31. The icache is screwed at startup. */
874 	write_c0_taglo(0);
875 	write_c0_taghi(0);
876 
877 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
878 		__asm__ __volatile__ (
879 			".set push\n\t"
880 			".set noreorder\n\t"
881 			".set mips3\n\t"
882 			"cache\t%1, 0(%0)\n\t"
883 			"cache\t%1, 0x1000(%0)\n\t"
884 			"cache\t%1, 0x2000(%0)\n\t"
885 			"cache\t%1, 0x3000(%0)\n\t"
886 			"cache\t%2, 0(%0)\n\t"
887 			"cache\t%2, 0x1000(%0)\n\t"
888 			"cache\t%2, 0x2000(%0)\n\t"
889 			"cache\t%2, 0x3000(%0)\n\t"
890 			"cache\t%1, 0(%0)\n\t"
891 			"cache\t%1, 0x1000(%0)\n\t"
892 			"cache\t%1, 0x2000(%0)\n\t"
893 			"cache\t%1, 0x3000(%0)\n\t"
894 			".set pop\n"
895 			:
896 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
897 	}
898 }
899 
900 static inline int alias_74k_erratum(struct cpuinfo_mips *c)
901 {
902 	unsigned int imp = c->processor_id & PRID_IMP_MASK;
903 	unsigned int rev = c->processor_id & PRID_REV_MASK;
904 	int present = 0;
905 
906 	/*
907 	 * Early versions of the 74K do not update the cache tags on a
908 	 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
909 	 * aliases.  In this case it is better to treat the cache as always
910 	 * having aliases.  Also disable the synonym tag update feature
911 	 * where available.  In this case no opportunistic tag update will
912 	 * happen where a load causes a virtual address miss but a physical
913 	 * address hit during a D-cache look-up.
914 	 */
915 	switch (imp) {
916 	case PRID_IMP_74K:
917 		if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
918 			present = 1;
919 		if (rev == PRID_REV_ENCODE_332(2, 4, 0))
920 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
921 		break;
922 	case PRID_IMP_1074K:
923 		if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
924 			present = 1;
925 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
926 		}
927 		break;
928 	default:
929 		BUG();
930 	}
931 
932 	return present;
933 }
934 
935 static void b5k_instruction_hazard(void)
936 {
937 	__sync();
938 	__sync();
939 	__asm__ __volatile__(
940 	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
941 	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
942 	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
943 	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
944 	: : : "memory");
945 }
946 
947 static char *way_string[] = { NULL, "direct mapped", "2-way",
948 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
949 	"9-way", "10-way", "11-way", "12-way",
950 	"13-way", "14-way", "15-way", "16-way",
951 };
952 
953 static void probe_pcache(void)
954 {
955 	struct cpuinfo_mips *c = &current_cpu_data;
956 	unsigned int config = read_c0_config();
957 	unsigned int prid = read_c0_prid();
958 	int has_74k_erratum = 0;
959 	unsigned long config1;
960 	unsigned int lsize;
961 
962 	switch (current_cpu_type()) {
963 	case CPU_R4600:			/* QED style two way caches? */
964 	case CPU_R4700:
965 	case CPU_R5000:
966 	case CPU_NEVADA:
967 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
968 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
969 		c->icache.ways = 2;
970 		c->icache.waybit = __ffs(icache_size/2);
971 
972 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
973 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
974 		c->dcache.ways = 2;
975 		c->dcache.waybit= __ffs(dcache_size/2);
976 
977 		c->options |= MIPS_CPU_CACHE_CDEX_P;
978 		break;
979 
980 	case CPU_R5432:
981 	case CPU_R5500:
982 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
983 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
984 		c->icache.ways = 2;
985 		c->icache.waybit= 0;
986 
987 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
988 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
989 		c->dcache.ways = 2;
990 		c->dcache.waybit = 0;
991 
992 		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
993 		break;
994 
995 	case CPU_TX49XX:
996 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
997 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
998 		c->icache.ways = 4;
999 		c->icache.waybit= 0;
1000 
1001 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1002 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1003 		c->dcache.ways = 4;
1004 		c->dcache.waybit = 0;
1005 
1006 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1007 		c->options |= MIPS_CPU_PREFETCH;
1008 		break;
1009 
1010 	case CPU_R4000PC:
1011 	case CPU_R4000SC:
1012 	case CPU_R4000MC:
1013 	case CPU_R4400PC:
1014 	case CPU_R4400SC:
1015 	case CPU_R4400MC:
1016 	case CPU_R4300:
1017 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1018 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1019 		c->icache.ways = 1;
1020 		c->icache.waybit = 0;	/* doesn't matter */
1021 
1022 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1023 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1024 		c->dcache.ways = 1;
1025 		c->dcache.waybit = 0;	/* does not matter */
1026 
1027 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1028 		break;
1029 
1030 	case CPU_R10000:
1031 	case CPU_R12000:
1032 	case CPU_R14000:
1033 	case CPU_R16000:
1034 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1035 		c->icache.linesz = 64;
1036 		c->icache.ways = 2;
1037 		c->icache.waybit = 0;
1038 
1039 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1040 		c->dcache.linesz = 32;
1041 		c->dcache.ways = 2;
1042 		c->dcache.waybit = 0;
1043 
1044 		c->options |= MIPS_CPU_PREFETCH;
1045 		break;
1046 
1047 	case CPU_VR4133:
1048 		write_c0_config(config & ~VR41_CONF_P4K);
1049 	case CPU_VR4131:
1050 		/* Workaround for cache instruction bug of VR4131 */
1051 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1052 		    c->processor_id == 0x0c82U) {
1053 			config |= 0x00400000U;
1054 			if (c->processor_id == 0x0c80U)
1055 				config |= VR41_CONF_BP;
1056 			write_c0_config(config);
1057 		} else
1058 			c->options |= MIPS_CPU_CACHE_CDEX_P;
1059 
1060 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1061 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1062 		c->icache.ways = 2;
1063 		c->icache.waybit = __ffs(icache_size/2);
1064 
1065 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1066 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1067 		c->dcache.ways = 2;
1068 		c->dcache.waybit = __ffs(dcache_size/2);
1069 		break;
1070 
1071 	case CPU_VR41XX:
1072 	case CPU_VR4111:
1073 	case CPU_VR4121:
1074 	case CPU_VR4122:
1075 	case CPU_VR4181:
1076 	case CPU_VR4181A:
1077 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1078 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1079 		c->icache.ways = 1;
1080 		c->icache.waybit = 0;	/* doesn't matter */
1081 
1082 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1083 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1084 		c->dcache.ways = 1;
1085 		c->dcache.waybit = 0;	/* does not matter */
1086 
1087 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1088 		break;
1089 
1090 	case CPU_RM7000:
1091 		rm7k_erratum31();
1092 
1093 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1094 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1095 		c->icache.ways = 4;
1096 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
1097 
1098 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1099 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1100 		c->dcache.ways = 4;
1101 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1102 
1103 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1104 		c->options |= MIPS_CPU_PREFETCH;
1105 		break;
1106 
1107 	case CPU_LOONGSON2:
1108 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1109 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1110 		if (prid & 0x3)
1111 			c->icache.ways = 4;
1112 		else
1113 			c->icache.ways = 2;
1114 		c->icache.waybit = 0;
1115 
1116 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1117 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1118 		if (prid & 0x3)
1119 			c->dcache.ways = 4;
1120 		else
1121 			c->dcache.ways = 2;
1122 		c->dcache.waybit = 0;
1123 		break;
1124 
1125 	case CPU_LOONGSON3:
1126 		config1 = read_c0_config1();
1127 		lsize = (config1 >> 19) & 7;
1128 		if (lsize)
1129 			c->icache.linesz = 2 << lsize;
1130 		else
1131 			c->icache.linesz = 0;
1132 		c->icache.sets = 64 << ((config1 >> 22) & 7);
1133 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1134 		icache_size = c->icache.sets *
1135 					  c->icache.ways *
1136 					  c->icache.linesz;
1137 		c->icache.waybit = 0;
1138 
1139 		lsize = (config1 >> 10) & 7;
1140 		if (lsize)
1141 			c->dcache.linesz = 2 << lsize;
1142 		else
1143 			c->dcache.linesz = 0;
1144 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
1145 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1146 		dcache_size = c->dcache.sets *
1147 					  c->dcache.ways *
1148 					  c->dcache.linesz;
1149 		c->dcache.waybit = 0;
1150 		break;
1151 
1152 	case CPU_CAVIUM_OCTEON3:
1153 		/* For now lie about the number of ways. */
1154 		c->icache.linesz = 128;
1155 		c->icache.sets = 16;
1156 		c->icache.ways = 8;
1157 		c->icache.flags |= MIPS_CACHE_VTAG;
1158 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1159 
1160 		c->dcache.linesz = 128;
1161 		c->dcache.ways = 8;
1162 		c->dcache.sets = 8;
1163 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1164 		c->options |= MIPS_CPU_PREFETCH;
1165 		break;
1166 
1167 	default:
1168 		if (!(config & MIPS_CONF_M))
1169 			panic("Don't know how to probe P-caches on this cpu.");
1170 
1171 		/*
1172 		 * So we seem to be a MIPS32 or MIPS64 CPU
1173 		 * So let's probe the I-cache ...
1174 		 */
1175 		config1 = read_c0_config1();
1176 
1177 		lsize = (config1 >> 19) & 7;
1178 
1179 		/* IL == 7 is reserved */
1180 		if (lsize == 7)
1181 			panic("Invalid icache line size");
1182 
1183 		c->icache.linesz = lsize ? 2 << lsize : 0;
1184 
1185 		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1186 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1187 
1188 		icache_size = c->icache.sets *
1189 			      c->icache.ways *
1190 			      c->icache.linesz;
1191 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
1192 
1193 		if (config & 0x8)		/* VI bit */
1194 			c->icache.flags |= MIPS_CACHE_VTAG;
1195 
1196 		/*
1197 		 * Now probe the MIPS32 / MIPS64 data cache.
1198 		 */
1199 		c->dcache.flags = 0;
1200 
1201 		lsize = (config1 >> 10) & 7;
1202 
1203 		/* DL == 7 is reserved */
1204 		if (lsize == 7)
1205 			panic("Invalid dcache line size");
1206 
1207 		c->dcache.linesz = lsize ? 2 << lsize : 0;
1208 
1209 		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1210 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1211 
1212 		dcache_size = c->dcache.sets *
1213 			      c->dcache.ways *
1214 			      c->dcache.linesz;
1215 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1216 
1217 		c->options |= MIPS_CPU_PREFETCH;
1218 		break;
1219 	}
1220 
1221 	/*
1222 	 * Processor configuration sanity check for the R4000SC erratum
1223 	 * #5.	With page sizes larger than 32kB there is no possibility
1224 	 * to get a VCE exception anymore so we don't care about this
1225 	 * misconfiguration.  The case is rather theoretical anyway;
1226 	 * presumably no vendor is shipping his hardware in the "bad"
1227 	 * configuration.
1228 	 */
1229 	if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1230 	    (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1231 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1232 	    PAGE_SIZE <= 0x8000)
1233 		panic("Improper R4000SC processor configuration detected");
1234 
1235 	/* compute a couple of other cache variables */
1236 	c->icache.waysize = icache_size / c->icache.ways;
1237 	c->dcache.waysize = dcache_size / c->dcache.ways;
1238 
1239 	c->icache.sets = c->icache.linesz ?
1240 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1241 	c->dcache.sets = c->dcache.linesz ?
1242 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1243 
1244 	/*
1245 	 * R1x000 P-caches are odd in a positive way.  They're 32kB 2-way
1246 	 * virtually indexed so normally would suffer from aliases.  So
1247 	 * normally they'd suffer from aliases but magic in the hardware deals
1248 	 * with that for us so we don't need to take care ourselves.
1249 	 */
1250 	switch (current_cpu_type()) {
1251 	case CPU_20KC:
1252 	case CPU_25KF:
1253 	case CPU_SB1:
1254 	case CPU_SB1A:
1255 	case CPU_XLR:
1256 		c->dcache.flags |= MIPS_CACHE_PINDEX;
1257 		break;
1258 
1259 	case CPU_R10000:
1260 	case CPU_R12000:
1261 	case CPU_R14000:
1262 	case CPU_R16000:
1263 		break;
1264 
1265 	case CPU_74K:
1266 	case CPU_1074K:
1267 		has_74k_erratum = alias_74k_erratum(c);
1268 		/* Fall through. */
1269 	case CPU_M14KC:
1270 	case CPU_M14KEC:
1271 	case CPU_24K:
1272 	case CPU_34K:
1273 	case CPU_1004K:
1274 	case CPU_INTERAPTIV:
1275 	case CPU_P5600:
1276 	case CPU_PROAPTIV:
1277 	case CPU_M5150:
1278 	case CPU_QEMU_GENERIC:
1279 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1280 		    (c->icache.waysize > PAGE_SIZE))
1281 			c->icache.flags |= MIPS_CACHE_ALIASES;
1282 		if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1283 			/*
1284 			 * Effectively physically indexed dcache,
1285 			 * thus no virtual aliases.
1286 			*/
1287 			c->dcache.flags |= MIPS_CACHE_PINDEX;
1288 			break;
1289 		}
1290 	default:
1291 		if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1292 			c->dcache.flags |= MIPS_CACHE_ALIASES;
1293 	}
1294 
1295 	switch (current_cpu_type()) {
1296 	case CPU_20KC:
1297 		/*
1298 		 * Some older 20Kc chips doesn't have the 'VI' bit in
1299 		 * the config register.
1300 		 */
1301 		c->icache.flags |= MIPS_CACHE_VTAG;
1302 		break;
1303 
1304 	case CPU_ALCHEMY:
1305 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1306 		break;
1307 
1308 	case CPU_LOONGSON2:
1309 		/*
1310 		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1311 		 * one op will act on all 4 ways
1312 		 */
1313 		c->icache.ways = 1;
1314 	}
1315 
1316 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1317 	       icache_size >> 10,
1318 	       c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1319 	       way_string[c->icache.ways], c->icache.linesz);
1320 
1321 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1322 	       dcache_size >> 10, way_string[c->dcache.ways],
1323 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1324 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1325 			"cache aliases" : "no aliases",
1326 	       c->dcache.linesz);
1327 }
1328 
1329 /*
1330  * If you even _breathe_ on this function, look at the gcc output and make sure
1331  * it does not pop things on and off the stack for the cache sizing loop that
1332  * executes in KSEG1 space or else you will crash and burn badly.  You have
1333  * been warned.
1334  */
1335 static int probe_scache(void)
1336 {
1337 	unsigned long flags, addr, begin, end, pow2;
1338 	unsigned int config = read_c0_config();
1339 	struct cpuinfo_mips *c = &current_cpu_data;
1340 
1341 	if (config & CONF_SC)
1342 		return 0;
1343 
1344 	begin = (unsigned long) &_stext;
1345 	begin &= ~((4 * 1024 * 1024) - 1);
1346 	end = begin + (4 * 1024 * 1024);
1347 
1348 	/*
1349 	 * This is such a bitch, you'd think they would make it easy to do
1350 	 * this.  Away you daemons of stupidity!
1351 	 */
1352 	local_irq_save(flags);
1353 
1354 	/* Fill each size-multiple cache line with a valid tag. */
1355 	pow2 = (64 * 1024);
1356 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1357 		unsigned long *p = (unsigned long *) addr;
1358 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1359 		pow2 <<= 1;
1360 	}
1361 
1362 	/* Load first line with zero (therefore invalid) tag. */
1363 	write_c0_taglo(0);
1364 	write_c0_taghi(0);
1365 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1366 	cache_op(Index_Store_Tag_I, begin);
1367 	cache_op(Index_Store_Tag_D, begin);
1368 	cache_op(Index_Store_Tag_SD, begin);
1369 
1370 	/* Now search for the wrap around point. */
1371 	pow2 = (128 * 1024);
1372 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1373 		cache_op(Index_Load_Tag_SD, addr);
1374 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1375 		if (!read_c0_taglo())
1376 			break;
1377 		pow2 <<= 1;
1378 	}
1379 	local_irq_restore(flags);
1380 	addr -= begin;
1381 
1382 	scache_size = addr;
1383 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1384 	c->scache.ways = 1;
1385 	c->scache.waybit = 0;		/* does not matter */
1386 
1387 	return 1;
1388 }
1389 
1390 static void __init loongson2_sc_init(void)
1391 {
1392 	struct cpuinfo_mips *c = &current_cpu_data;
1393 
1394 	scache_size = 512*1024;
1395 	c->scache.linesz = 32;
1396 	c->scache.ways = 4;
1397 	c->scache.waybit = 0;
1398 	c->scache.waysize = scache_size / (c->scache.ways);
1399 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1400 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1401 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1402 
1403 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1404 }
1405 
1406 static void __init loongson3_sc_init(void)
1407 {
1408 	struct cpuinfo_mips *c = &current_cpu_data;
1409 	unsigned int config2, lsize;
1410 
1411 	config2 = read_c0_config2();
1412 	lsize = (config2 >> 4) & 15;
1413 	if (lsize)
1414 		c->scache.linesz = 2 << lsize;
1415 	else
1416 		c->scache.linesz = 0;
1417 	c->scache.sets = 64 << ((config2 >> 8) & 15);
1418 	c->scache.ways = 1 + (config2 & 15);
1419 
1420 	scache_size = c->scache.sets *
1421 				  c->scache.ways *
1422 				  c->scache.linesz;
1423 	/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1424 	scache_size *= 4;
1425 	c->scache.waybit = 0;
1426 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1427 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1428 	if (scache_size)
1429 		c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1430 	return;
1431 }
1432 
1433 extern int r5k_sc_init(void);
1434 extern int rm7k_sc_init(void);
1435 extern int mips_sc_init(void);
1436 
1437 static void setup_scache(void)
1438 {
1439 	struct cpuinfo_mips *c = &current_cpu_data;
1440 	unsigned int config = read_c0_config();
1441 	int sc_present = 0;
1442 
1443 	/*
1444 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1445 	 * processors don't have a S-cache that would be relevant to the
1446 	 * Linux memory management.
1447 	 */
1448 	switch (current_cpu_type()) {
1449 	case CPU_R4000SC:
1450 	case CPU_R4000MC:
1451 	case CPU_R4400SC:
1452 	case CPU_R4400MC:
1453 		sc_present = run_uncached(probe_scache);
1454 		if (sc_present)
1455 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1456 		break;
1457 
1458 	case CPU_R10000:
1459 	case CPU_R12000:
1460 	case CPU_R14000:
1461 	case CPU_R16000:
1462 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1463 		c->scache.linesz = 64 << ((config >> 13) & 1);
1464 		c->scache.ways = 2;
1465 		c->scache.waybit= 0;
1466 		sc_present = 1;
1467 		break;
1468 
1469 	case CPU_R5000:
1470 	case CPU_NEVADA:
1471 #ifdef CONFIG_R5000_CPU_SCACHE
1472 		r5k_sc_init();
1473 #endif
1474 		return;
1475 
1476 	case CPU_RM7000:
1477 #ifdef CONFIG_RM7000_CPU_SCACHE
1478 		rm7k_sc_init();
1479 #endif
1480 		return;
1481 
1482 	case CPU_LOONGSON2:
1483 		loongson2_sc_init();
1484 		return;
1485 
1486 	case CPU_LOONGSON3:
1487 		loongson3_sc_init();
1488 		return;
1489 
1490 	case CPU_CAVIUM_OCTEON3:
1491 	case CPU_XLP:
1492 		/* don't need to worry about L2, fully coherent */
1493 		return;
1494 
1495 	default:
1496 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1497 				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1498 				    MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1499 #ifdef CONFIG_MIPS_CPU_SCACHE
1500 			if (mips_sc_init ()) {
1501 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1502 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1503 				       scache_size >> 10,
1504 				       way_string[c->scache.ways], c->scache.linesz);
1505 			}
1506 #else
1507 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1508 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1509 #endif
1510 			return;
1511 		}
1512 		sc_present = 0;
1513 	}
1514 
1515 	if (!sc_present)
1516 		return;
1517 
1518 	/* compute a couple of other cache variables */
1519 	c->scache.waysize = scache_size / c->scache.ways;
1520 
1521 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1522 
1523 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1524 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1525 
1526 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1527 }
1528 
1529 void au1x00_fixup_config_od(void)
1530 {
1531 	/*
1532 	 * c0_config.od (bit 19) was write only (and read as 0)
1533 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1534 	 * transaction overlapping and needs to be set to fix various errata.
1535 	 */
1536 	switch (read_c0_prid()) {
1537 	case 0x00030100: /* Au1000 DA */
1538 	case 0x00030201: /* Au1000 HA */
1539 	case 0x00030202: /* Au1000 HB */
1540 	case 0x01030200: /* Au1500 AB */
1541 	/*
1542 	 * Au1100 errata actually keeps silence about this bit, so we set it
1543 	 * just in case for those revisions that require it to be set according
1544 	 * to the (now gone) cpu table.
1545 	 */
1546 	case 0x02030200: /* Au1100 AB */
1547 	case 0x02030201: /* Au1100 BA */
1548 	case 0x02030202: /* Au1100 BC */
1549 		set_c0_config(1 << 19);
1550 		break;
1551 	}
1552 }
1553 
1554 /* CP0 hazard avoidance. */
1555 #define NXP_BARRIER()							\
1556 	 __asm__ __volatile__(						\
1557 	".set noreorder\n\t"						\
1558 	"nop; nop; nop; nop; nop; nop;\n\t"				\
1559 	".set reorder\n\t")
1560 
1561 static void nxp_pr4450_fixup_config(void)
1562 {
1563 	unsigned long config0;
1564 
1565 	config0 = read_c0_config();
1566 
1567 	/* clear all three cache coherency fields */
1568 	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1569 	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1570 		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1571 		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1572 	write_c0_config(config0);
1573 	NXP_BARRIER();
1574 }
1575 
1576 static int cca = -1;
1577 
1578 static int __init cca_setup(char *str)
1579 {
1580 	get_option(&str, &cca);
1581 
1582 	return 0;
1583 }
1584 
1585 early_param("cca", cca_setup);
1586 
1587 static void coherency_setup(void)
1588 {
1589 	if (cca < 0 || cca > 7)
1590 		cca = read_c0_config() & CONF_CM_CMASK;
1591 	_page_cachable_default = cca << _CACHE_SHIFT;
1592 
1593 	pr_debug("Using cache attribute %d\n", cca);
1594 	change_c0_config(CONF_CM_CMASK, cca);
1595 
1596 	/*
1597 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1598 	 * the coherency mode specified by the TLB; 1 means cachable
1599 	 * coherent update on write will be used.  Not all processors have
1600 	 * this bit and; some wire it to zero, others like Toshiba had the
1601 	 * silly idea of putting something else there ...
1602 	 */
1603 	switch (current_cpu_type()) {
1604 	case CPU_R4000PC:
1605 	case CPU_R4000SC:
1606 	case CPU_R4000MC:
1607 	case CPU_R4400PC:
1608 	case CPU_R4400SC:
1609 	case CPU_R4400MC:
1610 		clear_c0_config(CONF_CU);
1611 		break;
1612 	/*
1613 	 * We need to catch the early Alchemy SOCs with
1614 	 * the write-only co_config.od bit and set it back to one on:
1615 	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1616 	 */
1617 	case CPU_ALCHEMY:
1618 		au1x00_fixup_config_od();
1619 		break;
1620 
1621 	case PRID_IMP_PR4450:
1622 		nxp_pr4450_fixup_config();
1623 		break;
1624 	}
1625 }
1626 
1627 static void r4k_cache_error_setup(void)
1628 {
1629 	extern char __weak except_vec2_generic;
1630 	extern char __weak except_vec2_sb1;
1631 
1632 	switch (current_cpu_type()) {
1633 	case CPU_SB1:
1634 	case CPU_SB1A:
1635 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1636 		break;
1637 
1638 	default:
1639 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1640 		break;
1641 	}
1642 }
1643 
1644 void r4k_cache_init(void)
1645 {
1646 	extern void build_clear_page(void);
1647 	extern void build_copy_page(void);
1648 	struct cpuinfo_mips *c = &current_cpu_data;
1649 
1650 	probe_pcache();
1651 	setup_scache();
1652 
1653 	r4k_blast_dcache_page_setup();
1654 	r4k_blast_dcache_page_indexed_setup();
1655 	r4k_blast_dcache_setup();
1656 	r4k_blast_icache_page_setup();
1657 	r4k_blast_icache_page_indexed_setup();
1658 	r4k_blast_icache_setup();
1659 	r4k_blast_scache_page_setup();
1660 	r4k_blast_scache_page_indexed_setup();
1661 	r4k_blast_scache_setup();
1662 #ifdef CONFIG_EVA
1663 	r4k_blast_dcache_user_page_setup();
1664 	r4k_blast_icache_user_page_setup();
1665 #endif
1666 
1667 	/*
1668 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1669 	 * This code supports virtually indexed processors and will be
1670 	 * unnecessarily inefficient on physically indexed processors.
1671 	 */
1672 	if (c->dcache.linesz)
1673 		shm_align_mask = max_t( unsigned long,
1674 					c->dcache.sets * c->dcache.linesz - 1,
1675 					PAGE_SIZE - 1);
1676 	else
1677 		shm_align_mask = PAGE_SIZE-1;
1678 
1679 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1680 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1681 
1682 	flush_cache_all		= cache_noop;
1683 	__flush_cache_all	= r4k___flush_cache_all;
1684 	flush_cache_mm		= r4k_flush_cache_mm;
1685 	flush_cache_page	= r4k_flush_cache_page;
1686 	flush_cache_range	= r4k_flush_cache_range;
1687 
1688 	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1689 
1690 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1691 	flush_icache_all	= r4k_flush_icache_all;
1692 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1693 	flush_data_cache_page	= r4k_flush_data_cache_page;
1694 	flush_icache_range	= r4k_flush_icache_range;
1695 	local_flush_icache_range	= local_r4k_flush_icache_range;
1696 
1697 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1698 	if (coherentio) {
1699 		_dma_cache_wback_inv	= (void *)cache_noop;
1700 		_dma_cache_wback	= (void *)cache_noop;
1701 		_dma_cache_inv		= (void *)cache_noop;
1702 	} else {
1703 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1704 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1705 		_dma_cache_inv		= r4k_dma_cache_inv;
1706 	}
1707 #endif
1708 
1709 	build_clear_page();
1710 	build_copy_page();
1711 
1712 	/*
1713 	 * We want to run CMP kernels on core with and without coherent
1714 	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1715 	 * or not to flush caches.
1716 	 */
1717 	local_r4k___flush_cache_all(NULL);
1718 
1719 	coherency_setup();
1720 	board_cache_error_setup = r4k_cache_error_setup;
1721 
1722 	/*
1723 	 * Per-CPU overrides
1724 	 */
1725 	switch (current_cpu_type()) {
1726 	case CPU_BMIPS4350:
1727 	case CPU_BMIPS4380:
1728 		/* No IPI is needed because all CPUs share the same D$ */
1729 		flush_data_cache_page = r4k_blast_dcache_page;
1730 		break;
1731 	case CPU_BMIPS5000:
1732 		/* We lose our superpowers if L2 is disabled */
1733 		if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1734 			break;
1735 
1736 		/* I$ fills from D$ just by emptying the write buffers */
1737 		flush_cache_page = (void *)b5k_instruction_hazard;
1738 		flush_cache_range = (void *)b5k_instruction_hazard;
1739 		flush_cache_sigtramp = (void *)b5k_instruction_hazard;
1740 		local_flush_data_cache_page = (void *)b5k_instruction_hazard;
1741 		flush_data_cache_page = (void *)b5k_instruction_hazard;
1742 		flush_icache_range = (void *)b5k_instruction_hazard;
1743 		local_flush_icache_range = (void *)b5k_instruction_hazard;
1744 
1745 		/* Cache aliases are handled in hardware; allow HIGHMEM */
1746 		current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
1747 
1748 		/* Optimization: an L2 flush implicitly flushes the L1 */
1749 		current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1750 		break;
1751 	}
1752 }
1753 
1754 static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1755 			       void *v)
1756 {
1757 	switch (cmd) {
1758 	case CPU_PM_ENTER_FAILED:
1759 	case CPU_PM_EXIT:
1760 		coherency_setup();
1761 		break;
1762 	}
1763 
1764 	return NOTIFY_OK;
1765 }
1766 
1767 static struct notifier_block r4k_cache_pm_notifier_block = {
1768 	.notifier_call = r4k_cache_pm_notifier,
1769 };
1770 
1771 int __init r4k_cache_init_pm(void)
1772 {
1773 	return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
1774 }
1775 arch_initcall(r4k_cache_init_pm);
1776