xref: /linux/arch/mips/mm/c-r4k.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/init.h>
11 #include <linux/highmem.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/bitops.h>
17 
18 #include <asm/bcache.h>
19 #include <asm/bootinfo.h>
20 #include <asm/cache.h>
21 #include <asm/cacheops.h>
22 #include <asm/cpu.h>
23 #include <asm/cpu-features.h>
24 #include <asm/io.h>
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/r4kcache.h>
28 #include <asm/sections.h>
29 #include <asm/system.h>
30 #include <asm/mmu_context.h>
31 #include <asm/war.h>
32 #include <asm/cacheflush.h> /* for run_uncached() */
33 
34 
35 /*
36  * Special Variant of smp_call_function for use by cache functions:
37  *
38  *  o No return value
39  *  o collapses to normal function call on UP kernels
40  *  o collapses to normal function call on systems with a single shared
41  *    primary cache.
42  */
43 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
44                                    int retry, int wait)
45 {
46 	preempt_disable();
47 
48 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
49 	smp_call_function(func, info, retry, wait);
50 #endif
51 	func(info);
52 	preempt_enable();
53 }
54 
55 /*
56  * Must die.
57  */
58 static unsigned long icache_size __read_mostly;
59 static unsigned long dcache_size __read_mostly;
60 static unsigned long scache_size __read_mostly;
61 
62 /*
63  * Dummy cache handling routines for machines without boardcaches
64  */
65 static void cache_noop(void) {}
66 
67 static struct bcache_ops no_sc_ops = {
68 	.bc_enable = (void *)cache_noop,
69 	.bc_disable = (void *)cache_noop,
70 	.bc_wback_inv = (void *)cache_noop,
71 	.bc_inv = (void *)cache_noop
72 };
73 
74 struct bcache_ops *bcops = &no_sc_ops;
75 
76 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
77 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
78 
79 #define R4600_HIT_CACHEOP_WAR_IMPL					\
80 do {									\
81 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
82 		*(volatile unsigned long *)CKSEG1;			\
83 	if (R4600_V1_HIT_CACHEOP_WAR)					\
84 		__asm__ __volatile__("nop;nop;nop;nop");		\
85 } while (0)
86 
87 static void (*r4k_blast_dcache_page)(unsigned long addr);
88 
89 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
90 {
91 	R4600_HIT_CACHEOP_WAR_IMPL;
92 	blast_dcache32_page(addr);
93 }
94 
95 static void __init r4k_blast_dcache_page_setup(void)
96 {
97 	unsigned long  dc_lsize = cpu_dcache_line_size();
98 
99 	if (dc_lsize == 0)
100 		r4k_blast_dcache_page = (void *)cache_noop;
101 	else if (dc_lsize == 16)
102 		r4k_blast_dcache_page = blast_dcache16_page;
103 	else if (dc_lsize == 32)
104 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
105 }
106 
107 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
108 
109 static void __init r4k_blast_dcache_page_indexed_setup(void)
110 {
111 	unsigned long dc_lsize = cpu_dcache_line_size();
112 
113 	if (dc_lsize == 0)
114 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
115 	else if (dc_lsize == 16)
116 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
117 	else if (dc_lsize == 32)
118 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
119 }
120 
121 static void (* r4k_blast_dcache)(void);
122 
123 static void __init r4k_blast_dcache_setup(void)
124 {
125 	unsigned long dc_lsize = cpu_dcache_line_size();
126 
127 	if (dc_lsize == 0)
128 		r4k_blast_dcache = (void *)cache_noop;
129 	else if (dc_lsize == 16)
130 		r4k_blast_dcache = blast_dcache16;
131 	else if (dc_lsize == 32)
132 		r4k_blast_dcache = blast_dcache32;
133 }
134 
135 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
136 #define JUMP_TO_ALIGN(order) \
137 	__asm__ __volatile__( \
138 		"b\t1f\n\t" \
139 		".align\t" #order "\n\t" \
140 		"1:\n\t" \
141 		)
142 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
143 #define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)
144 
145 static inline void blast_r4600_v1_icache32(void)
146 {
147 	unsigned long flags;
148 
149 	local_irq_save(flags);
150 	blast_icache32();
151 	local_irq_restore(flags);
152 }
153 
154 static inline void tx49_blast_icache32(void)
155 {
156 	unsigned long start = INDEX_BASE;
157 	unsigned long end = start + current_cpu_data.icache.waysize;
158 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
159 	unsigned long ws_end = current_cpu_data.icache.ways <<
160 	                       current_cpu_data.icache.waybit;
161 	unsigned long ws, addr;
162 
163 	CACHE32_UNROLL32_ALIGN2;
164 	/* I'm in even chunk.  blast odd chunks */
165 	for (ws = 0; ws < ws_end; ws += ws_inc)
166 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
167 			cache32_unroll32(addr|ws, Index_Invalidate_I);
168 	CACHE32_UNROLL32_ALIGN;
169 	/* I'm in odd chunk.  blast even chunks */
170 	for (ws = 0; ws < ws_end; ws += ws_inc)
171 		for (addr = start; addr < end; addr += 0x400 * 2)
172 			cache32_unroll32(addr|ws, Index_Invalidate_I);
173 }
174 
175 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
176 {
177 	unsigned long flags;
178 
179 	local_irq_save(flags);
180 	blast_icache32_page_indexed(page);
181 	local_irq_restore(flags);
182 }
183 
184 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
185 {
186 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
187 	unsigned long start = INDEX_BASE + (page & indexmask);
188 	unsigned long end = start + PAGE_SIZE;
189 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
190 	unsigned long ws_end = current_cpu_data.icache.ways <<
191 	                       current_cpu_data.icache.waybit;
192 	unsigned long ws, addr;
193 
194 	CACHE32_UNROLL32_ALIGN2;
195 	/* I'm in even chunk.  blast odd chunks */
196 	for (ws = 0; ws < ws_end; ws += ws_inc)
197 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
198 			cache32_unroll32(addr|ws, Index_Invalidate_I);
199 	CACHE32_UNROLL32_ALIGN;
200 	/* I'm in odd chunk.  blast even chunks */
201 	for (ws = 0; ws < ws_end; ws += ws_inc)
202 		for (addr = start; addr < end; addr += 0x400 * 2)
203 			cache32_unroll32(addr|ws, Index_Invalidate_I);
204 }
205 
206 static void (* r4k_blast_icache_page)(unsigned long addr);
207 
208 static void __init r4k_blast_icache_page_setup(void)
209 {
210 	unsigned long ic_lsize = cpu_icache_line_size();
211 
212 	if (ic_lsize == 0)
213 		r4k_blast_icache_page = (void *)cache_noop;
214 	else if (ic_lsize == 16)
215 		r4k_blast_icache_page = blast_icache16_page;
216 	else if (ic_lsize == 32)
217 		r4k_blast_icache_page = blast_icache32_page;
218 	else if (ic_lsize == 64)
219 		r4k_blast_icache_page = blast_icache64_page;
220 }
221 
222 
223 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
224 
225 static void __init r4k_blast_icache_page_indexed_setup(void)
226 {
227 	unsigned long ic_lsize = cpu_icache_line_size();
228 
229 	if (ic_lsize == 0)
230 		r4k_blast_icache_page_indexed = (void *)cache_noop;
231 	else if (ic_lsize == 16)
232 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
233 	else if (ic_lsize == 32) {
234 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
235 			r4k_blast_icache_page_indexed =
236 				blast_icache32_r4600_v1_page_indexed;
237 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
238 			r4k_blast_icache_page_indexed =
239 				tx49_blast_icache32_page_indexed;
240 		else
241 			r4k_blast_icache_page_indexed =
242 				blast_icache32_page_indexed;
243 	} else if (ic_lsize == 64)
244 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
245 }
246 
247 static void (* r4k_blast_icache)(void);
248 
249 static void __init r4k_blast_icache_setup(void)
250 {
251 	unsigned long ic_lsize = cpu_icache_line_size();
252 
253 	if (ic_lsize == 0)
254 		r4k_blast_icache = (void *)cache_noop;
255 	else if (ic_lsize == 16)
256 		r4k_blast_icache = blast_icache16;
257 	else if (ic_lsize == 32) {
258 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
259 			r4k_blast_icache = blast_r4600_v1_icache32;
260 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
261 			r4k_blast_icache = tx49_blast_icache32;
262 		else
263 			r4k_blast_icache = blast_icache32;
264 	} else if (ic_lsize == 64)
265 		r4k_blast_icache = blast_icache64;
266 }
267 
268 static void (* r4k_blast_scache_page)(unsigned long addr);
269 
270 static void __init r4k_blast_scache_page_setup(void)
271 {
272 	unsigned long sc_lsize = cpu_scache_line_size();
273 
274 	if (scache_size == 0)
275 		r4k_blast_scache_page = (void *)cache_noop;
276 	else if (sc_lsize == 16)
277 		r4k_blast_scache_page = blast_scache16_page;
278 	else if (sc_lsize == 32)
279 		r4k_blast_scache_page = blast_scache32_page;
280 	else if (sc_lsize == 64)
281 		r4k_blast_scache_page = blast_scache64_page;
282 	else if (sc_lsize == 128)
283 		r4k_blast_scache_page = blast_scache128_page;
284 }
285 
286 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
287 
288 static void __init r4k_blast_scache_page_indexed_setup(void)
289 {
290 	unsigned long sc_lsize = cpu_scache_line_size();
291 
292 	if (scache_size == 0)
293 		r4k_blast_scache_page_indexed = (void *)cache_noop;
294 	else if (sc_lsize == 16)
295 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
296 	else if (sc_lsize == 32)
297 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
298 	else if (sc_lsize == 64)
299 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
300 	else if (sc_lsize == 128)
301 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
302 }
303 
304 static void (* r4k_blast_scache)(void);
305 
306 static void __init r4k_blast_scache_setup(void)
307 {
308 	unsigned long sc_lsize = cpu_scache_line_size();
309 
310 	if (scache_size == 0)
311 		r4k_blast_scache = (void *)cache_noop;
312 	else if (sc_lsize == 16)
313 		r4k_blast_scache = blast_scache16;
314 	else if (sc_lsize == 32)
315 		r4k_blast_scache = blast_scache32;
316 	else if (sc_lsize == 64)
317 		r4k_blast_scache = blast_scache64;
318 	else if (sc_lsize == 128)
319 		r4k_blast_scache = blast_scache128;
320 }
321 
322 static inline void local_r4k___flush_cache_all(void * args)
323 {
324 #if defined(CONFIG_CPU_LOONGSON2)
325 	r4k_blast_scache();
326 	return;
327 #endif
328 	r4k_blast_dcache();
329 	r4k_blast_icache();
330 
331 	switch (current_cpu_type()) {
332 	case CPU_R4000SC:
333 	case CPU_R4000MC:
334 	case CPU_R4400SC:
335 	case CPU_R4400MC:
336 	case CPU_R10000:
337 	case CPU_R12000:
338 	case CPU_R14000:
339 		r4k_blast_scache();
340 	}
341 }
342 
343 static void r4k___flush_cache_all(void)
344 {
345 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
346 }
347 
348 static inline void local_r4k_flush_cache_range(void * args)
349 {
350 	struct vm_area_struct *vma = args;
351 
352 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
353 		return;
354 
355 	r4k_blast_dcache();
356 }
357 
358 static void r4k_flush_cache_range(struct vm_area_struct *vma,
359 	unsigned long start, unsigned long end)
360 {
361 	if (!cpu_has_dc_aliases)
362 		return;
363 
364 	r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
365 }
366 
367 static inline void local_r4k_flush_cache_mm(void * args)
368 {
369 	struct mm_struct *mm = args;
370 
371 	if (!cpu_context(smp_processor_id(), mm))
372 		return;
373 
374 	/*
375 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
376 	 * only flush the primary caches but R10000 and R12000 behave sane ...
377 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
378 	 * caches, so we can bail out early.
379 	 */
380 	if (current_cpu_type() == CPU_R4000SC ||
381 	    current_cpu_type() == CPU_R4000MC ||
382 	    current_cpu_type() == CPU_R4400SC ||
383 	    current_cpu_type() == CPU_R4400MC) {
384 		r4k_blast_scache();
385 		return;
386 	}
387 
388 	r4k_blast_dcache();
389 }
390 
391 static void r4k_flush_cache_mm(struct mm_struct *mm)
392 {
393 	if (!cpu_has_dc_aliases)
394 		return;
395 
396 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
397 }
398 
399 struct flush_cache_page_args {
400 	struct vm_area_struct *vma;
401 	unsigned long addr;
402 	unsigned long pfn;
403 };
404 
405 static inline void local_r4k_flush_cache_page(void *args)
406 {
407 	struct flush_cache_page_args *fcp_args = args;
408 	struct vm_area_struct *vma = fcp_args->vma;
409 	unsigned long addr = fcp_args->addr;
410 	struct page *page = pfn_to_page(fcp_args->pfn);
411 	int exec = vma->vm_flags & VM_EXEC;
412 	struct mm_struct *mm = vma->vm_mm;
413 	pgd_t *pgdp;
414 	pud_t *pudp;
415 	pmd_t *pmdp;
416 	pte_t *ptep;
417 	void *vaddr;
418 
419 	/*
420 	 * If ownes no valid ASID yet, cannot possibly have gotten
421 	 * this page into the cache.
422 	 */
423 	if (cpu_context(smp_processor_id(), mm) == 0)
424 		return;
425 
426 	addr &= PAGE_MASK;
427 	pgdp = pgd_offset(mm, addr);
428 	pudp = pud_offset(pgdp, addr);
429 	pmdp = pmd_offset(pudp, addr);
430 	ptep = pte_offset(pmdp, addr);
431 
432 	/*
433 	 * If the page isn't marked valid, the page cannot possibly be
434 	 * in the cache.
435 	 */
436 	if (!(pte_val(*ptep) & _PAGE_PRESENT))
437 		return;
438 
439 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
440 		vaddr = NULL;
441 	else {
442 		/*
443 		 * Use kmap_coherent or kmap_atomic to do flushes for
444 		 * another ASID than the current one.
445 		 */
446 		if (cpu_has_dc_aliases)
447 			vaddr = kmap_coherent(page, addr);
448 		else
449 			vaddr = kmap_atomic(page, KM_USER0);
450 		addr = (unsigned long)vaddr;
451 	}
452 
453 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
454 		r4k_blast_dcache_page(addr);
455 		if (exec && !cpu_icache_snoops_remote_store)
456 			r4k_blast_scache_page(addr);
457 	}
458 	if (exec) {
459 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
460 			int cpu = smp_processor_id();
461 
462 			if (cpu_context(cpu, mm) != 0)
463 				drop_mmu_context(mm, cpu);
464 		} else
465 			r4k_blast_icache_page(addr);
466 	}
467 
468 	if (vaddr) {
469 		if (cpu_has_dc_aliases)
470 			kunmap_coherent();
471 		else
472 			kunmap_atomic(vaddr, KM_USER0);
473 	}
474 }
475 
476 static void r4k_flush_cache_page(struct vm_area_struct *vma,
477 	unsigned long addr, unsigned long pfn)
478 {
479 	struct flush_cache_page_args args;
480 
481 	args.vma = vma;
482 	args.addr = addr;
483 	args.pfn = pfn;
484 
485 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
486 }
487 
488 static inline void local_r4k_flush_data_cache_page(void * addr)
489 {
490 	r4k_blast_dcache_page((unsigned long) addr);
491 }
492 
493 static void r4k_flush_data_cache_page(unsigned long addr)
494 {
495 	r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
496 }
497 
498 struct flush_icache_range_args {
499 	unsigned long start;
500 	unsigned long end;
501 };
502 
503 static inline void local_r4k_flush_icache_range(void *args)
504 {
505 	struct flush_icache_range_args *fir_args = args;
506 	unsigned long start = fir_args->start;
507 	unsigned long end = fir_args->end;
508 
509 	if (!cpu_has_ic_fills_f_dc) {
510 		if (end - start >= dcache_size) {
511 			r4k_blast_dcache();
512 		} else {
513 			R4600_HIT_CACHEOP_WAR_IMPL;
514 			protected_blast_dcache_range(start, end);
515 		}
516 
517 		if (!cpu_icache_snoops_remote_store && scache_size) {
518 			if (end - start > scache_size)
519 				r4k_blast_scache();
520 			else
521 				protected_blast_scache_range(start, end);
522 		}
523 	}
524 
525 	if (end - start > icache_size)
526 		r4k_blast_icache();
527 	else
528 		protected_blast_icache_range(start, end);
529 }
530 
531 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
532 {
533 	struct flush_icache_range_args args;
534 
535 	args.start = start;
536 	args.end = end;
537 
538 	r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
539 	instruction_hazard();
540 }
541 
542 #ifdef CONFIG_DMA_NONCOHERENT
543 
544 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
545 {
546 	/* Catch bad driver code */
547 	BUG_ON(size == 0);
548 
549 	if (cpu_has_inclusive_pcaches) {
550 		if (size >= scache_size)
551 			r4k_blast_scache();
552 		else
553 			blast_scache_range(addr, addr + size);
554 		return;
555 	}
556 
557 	/*
558 	 * Either no secondary cache or the available caches don't have the
559 	 * subset property so we have to flush the primary caches
560 	 * explicitly
561 	 */
562 	if (size >= dcache_size) {
563 		r4k_blast_dcache();
564 	} else {
565 		R4600_HIT_CACHEOP_WAR_IMPL;
566 		blast_dcache_range(addr, addr + size);
567 	}
568 
569 	bc_wback_inv(addr, size);
570 }
571 
572 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
573 {
574 	/* Catch bad driver code */
575 	BUG_ON(size == 0);
576 
577 	if (cpu_has_inclusive_pcaches) {
578 		if (size >= scache_size)
579 			r4k_blast_scache();
580 		else
581 			blast_scache_range(addr, addr + size);
582 		return;
583 	}
584 
585 	if (size >= dcache_size) {
586 		r4k_blast_dcache();
587 	} else {
588 		R4600_HIT_CACHEOP_WAR_IMPL;
589 		blast_dcache_range(addr, addr + size);
590 	}
591 
592 	bc_inv(addr, size);
593 }
594 #endif /* CONFIG_DMA_NONCOHERENT */
595 
596 /*
597  * While we're protected against bad userland addresses we don't care
598  * very much about what happens in that case.  Usually a segmentation
599  * fault will dump the process later on anyway ...
600  */
601 static void local_r4k_flush_cache_sigtramp(void * arg)
602 {
603 	unsigned long ic_lsize = cpu_icache_line_size();
604 	unsigned long dc_lsize = cpu_dcache_line_size();
605 	unsigned long sc_lsize = cpu_scache_line_size();
606 	unsigned long addr = (unsigned long) arg;
607 
608 	R4600_HIT_CACHEOP_WAR_IMPL;
609 	if (dc_lsize)
610 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
611 	if (!cpu_icache_snoops_remote_store && scache_size)
612 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
613 	if (ic_lsize)
614 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
615 	if (MIPS4K_ICACHE_REFILL_WAR) {
616 		__asm__ __volatile__ (
617 			".set push\n\t"
618 			".set noat\n\t"
619 			".set mips3\n\t"
620 #ifdef CONFIG_32BIT
621 			"la	$at,1f\n\t"
622 #endif
623 #ifdef CONFIG_64BIT
624 			"dla	$at,1f\n\t"
625 #endif
626 			"cache	%0,($at)\n\t"
627 			"nop; nop; nop\n"
628 			"1:\n\t"
629 			".set pop"
630 			:
631 			: "i" (Hit_Invalidate_I));
632 	}
633 	if (MIPS_CACHE_SYNC_WAR)
634 		__asm__ __volatile__ ("sync");
635 }
636 
637 static void r4k_flush_cache_sigtramp(unsigned long addr)
638 {
639 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
640 }
641 
642 static void r4k_flush_icache_all(void)
643 {
644 	if (cpu_has_vtag_icache)
645 		r4k_blast_icache();
646 }
647 
648 static inline void rm7k_erratum31(void)
649 {
650 	const unsigned long ic_lsize = 32;
651 	unsigned long addr;
652 
653 	/* RM7000 erratum #31. The icache is screwed at startup. */
654 	write_c0_taglo(0);
655 	write_c0_taghi(0);
656 
657 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
658 		__asm__ __volatile__ (
659 			".set push\n\t"
660 			".set noreorder\n\t"
661 			".set mips3\n\t"
662 			"cache\t%1, 0(%0)\n\t"
663 			"cache\t%1, 0x1000(%0)\n\t"
664 			"cache\t%1, 0x2000(%0)\n\t"
665 			"cache\t%1, 0x3000(%0)\n\t"
666 			"cache\t%2, 0(%0)\n\t"
667 			"cache\t%2, 0x1000(%0)\n\t"
668 			"cache\t%2, 0x2000(%0)\n\t"
669 			"cache\t%2, 0x3000(%0)\n\t"
670 			"cache\t%1, 0(%0)\n\t"
671 			"cache\t%1, 0x1000(%0)\n\t"
672 			"cache\t%1, 0x2000(%0)\n\t"
673 			"cache\t%1, 0x3000(%0)\n\t"
674 			".set pop\n"
675 			:
676 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
677 	}
678 }
679 
680 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
681 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
682 };
683 
684 static void __init probe_pcache(void)
685 {
686 	struct cpuinfo_mips *c = &current_cpu_data;
687 	unsigned int config = read_c0_config();
688 	unsigned int prid = read_c0_prid();
689 	unsigned long config1;
690 	unsigned int lsize;
691 
692 	switch (c->cputype) {
693 	case CPU_R4600:			/* QED style two way caches? */
694 	case CPU_R4700:
695 	case CPU_R5000:
696 	case CPU_NEVADA:
697 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
698 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
699 		c->icache.ways = 2;
700 		c->icache.waybit = __ffs(icache_size/2);
701 
702 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
703 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
704 		c->dcache.ways = 2;
705 		c->dcache.waybit= __ffs(dcache_size/2);
706 
707 		c->options |= MIPS_CPU_CACHE_CDEX_P;
708 		break;
709 
710 	case CPU_R5432:
711 	case CPU_R5500:
712 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
713 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
714 		c->icache.ways = 2;
715 		c->icache.waybit= 0;
716 
717 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
718 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
719 		c->dcache.ways = 2;
720 		c->dcache.waybit = 0;
721 
722 		c->options |= MIPS_CPU_CACHE_CDEX_P;
723 		break;
724 
725 	case CPU_TX49XX:
726 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
727 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
728 		c->icache.ways = 4;
729 		c->icache.waybit= 0;
730 
731 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
732 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
733 		c->dcache.ways = 4;
734 		c->dcache.waybit = 0;
735 
736 		c->options |= MIPS_CPU_CACHE_CDEX_P;
737 		c->options |= MIPS_CPU_PREFETCH;
738 		break;
739 
740 	case CPU_R4000PC:
741 	case CPU_R4000SC:
742 	case CPU_R4000MC:
743 	case CPU_R4400PC:
744 	case CPU_R4400SC:
745 	case CPU_R4400MC:
746 	case CPU_R4300:
747 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
748 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
749 		c->icache.ways = 1;
750 		c->icache.waybit = 0; 	/* doesn't matter */
751 
752 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
753 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
754 		c->dcache.ways = 1;
755 		c->dcache.waybit = 0;	/* does not matter */
756 
757 		c->options |= MIPS_CPU_CACHE_CDEX_P;
758 		break;
759 
760 	case CPU_R10000:
761 	case CPU_R12000:
762 	case CPU_R14000:
763 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
764 		c->icache.linesz = 64;
765 		c->icache.ways = 2;
766 		c->icache.waybit = 0;
767 
768 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
769 		c->dcache.linesz = 32;
770 		c->dcache.ways = 2;
771 		c->dcache.waybit = 0;
772 
773 		c->options |= MIPS_CPU_PREFETCH;
774 		break;
775 
776 	case CPU_VR4133:
777 		write_c0_config(config & ~VR41_CONF_P4K);
778 	case CPU_VR4131:
779 		/* Workaround for cache instruction bug of VR4131 */
780 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
781 		    c->processor_id == 0x0c82U) {
782 			config |= 0x00400000U;
783 			if (c->processor_id == 0x0c80U)
784 				config |= VR41_CONF_BP;
785 			write_c0_config(config);
786 		} else
787 			c->options |= MIPS_CPU_CACHE_CDEX_P;
788 
789 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
790 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
791 		c->icache.ways = 2;
792 		c->icache.waybit = __ffs(icache_size/2);
793 
794 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
795 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
796 		c->dcache.ways = 2;
797 		c->dcache.waybit = __ffs(dcache_size/2);
798 		break;
799 
800 	case CPU_VR41XX:
801 	case CPU_VR4111:
802 	case CPU_VR4121:
803 	case CPU_VR4122:
804 	case CPU_VR4181:
805 	case CPU_VR4181A:
806 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
807 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
808 		c->icache.ways = 1;
809 		c->icache.waybit = 0; 	/* doesn't matter */
810 
811 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
812 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
813 		c->dcache.ways = 1;
814 		c->dcache.waybit = 0;	/* does not matter */
815 
816 		c->options |= MIPS_CPU_CACHE_CDEX_P;
817 		break;
818 
819 	case CPU_RM7000:
820 		rm7k_erratum31();
821 
822 	case CPU_RM9000:
823 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
824 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
825 		c->icache.ways = 4;
826 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
827 
828 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
829 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
830 		c->dcache.ways = 4;
831 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
832 
833 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
834 		c->options |= MIPS_CPU_CACHE_CDEX_P;
835 #endif
836 		c->options |= MIPS_CPU_PREFETCH;
837 		break;
838 
839 	case CPU_LOONGSON2:
840 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
841 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
842 		if (prid & 0x3)
843 			c->icache.ways = 4;
844 		else
845 			c->icache.ways = 2;
846 		c->icache.waybit = 0;
847 
848 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
849 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
850 		if (prid & 0x3)
851 			c->dcache.ways = 4;
852 		else
853 			c->dcache.ways = 2;
854 		c->dcache.waybit = 0;
855 		break;
856 
857 	default:
858 		if (!(config & MIPS_CONF_M))
859 			panic("Don't know how to probe P-caches on this cpu.");
860 
861 		/*
862 		 * So we seem to be a MIPS32 or MIPS64 CPU
863 		 * So let's probe the I-cache ...
864 		 */
865 		config1 = read_c0_config1();
866 
867 		if ((lsize = ((config1 >> 19) & 7)))
868 			c->icache.linesz = 2 << lsize;
869 		else
870 			c->icache.linesz = lsize;
871 		c->icache.sets = 64 << ((config1 >> 22) & 7);
872 		c->icache.ways = 1 + ((config1 >> 16) & 7);
873 
874 		icache_size = c->icache.sets *
875 		              c->icache.ways *
876 		              c->icache.linesz;
877 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
878 
879 		if (config & 0x8)		/* VI bit */
880 			c->icache.flags |= MIPS_CACHE_VTAG;
881 
882 		/*
883 		 * Now probe the MIPS32 / MIPS64 data cache.
884 		 */
885 		c->dcache.flags = 0;
886 
887 		if ((lsize = ((config1 >> 10) & 7)))
888 			c->dcache.linesz = 2 << lsize;
889 		else
890 			c->dcache.linesz= lsize;
891 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
892 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
893 
894 		dcache_size = c->dcache.sets *
895 		              c->dcache.ways *
896 		              c->dcache.linesz;
897 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
898 
899 		c->options |= MIPS_CPU_PREFETCH;
900 		break;
901 	}
902 
903 	/*
904 	 * Processor configuration sanity check for the R4000SC erratum
905 	 * #5.  With page sizes larger than 32kB there is no possibility
906 	 * to get a VCE exception anymore so we don't care about this
907 	 * misconfiguration.  The case is rather theoretical anyway;
908 	 * presumably no vendor is shipping his hardware in the "bad"
909 	 * configuration.
910 	 */
911 	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
912 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
913 	    PAGE_SIZE <= 0x8000)
914 		panic("Improper R4000SC processor configuration detected");
915 
916 	/* compute a couple of other cache variables */
917 	c->icache.waysize = icache_size / c->icache.ways;
918 	c->dcache.waysize = dcache_size / c->dcache.ways;
919 
920 	c->icache.sets = c->icache.linesz ?
921 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
922 	c->dcache.sets = c->dcache.linesz ?
923 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
924 
925 	/*
926 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
927 	 * 2-way virtually indexed so normally would suffer from aliases.  So
928 	 * normally they'd suffer from aliases but magic in the hardware deals
929 	 * with that for us so we don't need to take care ourselves.
930 	 */
931 	switch (c->cputype) {
932 	case CPU_20KC:
933 	case CPU_25KF:
934 	case CPU_SB1:
935 	case CPU_SB1A:
936 		c->dcache.flags |= MIPS_CACHE_PINDEX;
937 		break;
938 
939 	case CPU_R10000:
940 	case CPU_R12000:
941 	case CPU_R14000:
942 		break;
943 
944 	case CPU_24K:
945 	case CPU_34K:
946 	case CPU_74K:
947 		if ((read_c0_config7() & (1 << 16))) {
948 			/* effectively physically indexed dcache,
949 			   thus no virtual aliases. */
950 			c->dcache.flags |= MIPS_CACHE_PINDEX;
951 			break;
952 		}
953 	default:
954 		if (c->dcache.waysize > PAGE_SIZE)
955 			c->dcache.flags |= MIPS_CACHE_ALIASES;
956 	}
957 
958 	switch (c->cputype) {
959 	case CPU_20KC:
960 		/*
961 		 * Some older 20Kc chips doesn't have the 'VI' bit in
962 		 * the config register.
963 		 */
964 		c->icache.flags |= MIPS_CACHE_VTAG;
965 		break;
966 
967 	case CPU_AU1000:
968 	case CPU_AU1500:
969 	case CPU_AU1100:
970 	case CPU_AU1550:
971 	case CPU_AU1200:
972 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
973 		break;
974 	}
975 
976 #ifdef  CONFIG_CPU_LOONGSON2
977 	/*
978 	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
979 	 * one op will act on all 4 ways
980 	 */
981 	c->icache.ways = 1;
982 #endif
983 
984 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
985 	       icache_size >> 10,
986 	       cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
987 	       way_string[c->icache.ways], c->icache.linesz);
988 
989 	printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
990 	       dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
991 }
992 
993 /*
994  * If you even _breathe_ on this function, look at the gcc output and make sure
995  * it does not pop things on and off the stack for the cache sizing loop that
996  * executes in KSEG1 space or else you will crash and burn badly.  You have
997  * been warned.
998  */
999 static int __init probe_scache(void)
1000 {
1001 	unsigned long flags, addr, begin, end, pow2;
1002 	unsigned int config = read_c0_config();
1003 	struct cpuinfo_mips *c = &current_cpu_data;
1004 	int tmp;
1005 
1006 	if (config & CONF_SC)
1007 		return 0;
1008 
1009 	begin = (unsigned long) &_stext;
1010 	begin &= ~((4 * 1024 * 1024) - 1);
1011 	end = begin + (4 * 1024 * 1024);
1012 
1013 	/*
1014 	 * This is such a bitch, you'd think they would make it easy to do
1015 	 * this.  Away you daemons of stupidity!
1016 	 */
1017 	local_irq_save(flags);
1018 
1019 	/* Fill each size-multiple cache line with a valid tag. */
1020 	pow2 = (64 * 1024);
1021 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1022 		unsigned long *p = (unsigned long *) addr;
1023 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1024 		pow2 <<= 1;
1025 	}
1026 
1027 	/* Load first line with zero (therefore invalid) tag. */
1028 	write_c0_taglo(0);
1029 	write_c0_taghi(0);
1030 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1031 	cache_op(Index_Store_Tag_I, begin);
1032 	cache_op(Index_Store_Tag_D, begin);
1033 	cache_op(Index_Store_Tag_SD, begin);
1034 
1035 	/* Now search for the wrap around point. */
1036 	pow2 = (128 * 1024);
1037 	tmp = 0;
1038 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1039 		cache_op(Index_Load_Tag_SD, addr);
1040 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1041 		if (!read_c0_taglo())
1042 			break;
1043 		pow2 <<= 1;
1044 	}
1045 	local_irq_restore(flags);
1046 	addr -= begin;
1047 
1048 	scache_size = addr;
1049 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1050 	c->scache.ways = 1;
1051 	c->dcache.waybit = 0;		/* does not matter */
1052 
1053 	return 1;
1054 }
1055 
1056 #if defined(CONFIG_CPU_LOONGSON2)
1057 static void __init loongson2_sc_init(void)
1058 {
1059 	struct cpuinfo_mips *c = &current_cpu_data;
1060 
1061 	scache_size = 512*1024;
1062 	c->scache.linesz = 32;
1063 	c->scache.ways = 4;
1064 	c->scache.waybit = 0;
1065 	c->scache.waysize = scache_size / (c->scache.ways);
1066 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1067 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1068 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1069 
1070 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1071 }
1072 #endif
1073 
1074 extern int r5k_sc_init(void);
1075 extern int rm7k_sc_init(void);
1076 extern int mips_sc_init(void);
1077 
1078 static void __init setup_scache(void)
1079 {
1080 	struct cpuinfo_mips *c = &current_cpu_data;
1081 	unsigned int config = read_c0_config();
1082 	int sc_present = 0;
1083 
1084 	/*
1085 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1086 	 * processors don't have a S-cache that would be relevant to the
1087 	 * Linux memory managment.
1088 	 */
1089 	switch (c->cputype) {
1090 	case CPU_R4000SC:
1091 	case CPU_R4000MC:
1092 	case CPU_R4400SC:
1093 	case CPU_R4400MC:
1094 		sc_present = run_uncached(probe_scache);
1095 		if (sc_present)
1096 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1097 		break;
1098 
1099 	case CPU_R10000:
1100 	case CPU_R12000:
1101 	case CPU_R14000:
1102 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1103 		c->scache.linesz = 64 << ((config >> 13) & 1);
1104 		c->scache.ways = 2;
1105 		c->scache.waybit= 0;
1106 		sc_present = 1;
1107 		break;
1108 
1109 	case CPU_R5000:
1110 	case CPU_NEVADA:
1111 #ifdef CONFIG_R5000_CPU_SCACHE
1112 		r5k_sc_init();
1113 #endif
1114                 return;
1115 
1116 	case CPU_RM7000:
1117 	case CPU_RM9000:
1118 #ifdef CONFIG_RM7000_CPU_SCACHE
1119 		rm7k_sc_init();
1120 #endif
1121 		return;
1122 
1123 #if defined(CONFIG_CPU_LOONGSON2)
1124 	case CPU_LOONGSON2:
1125 		loongson2_sc_init();
1126 		return;
1127 #endif
1128 
1129 	default:
1130 		if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1131 		    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1132 		    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1133 		    c->isa_level == MIPS_CPU_ISA_M64R2) {
1134 #ifdef CONFIG_MIPS_CPU_SCACHE
1135 			if (mips_sc_init ()) {
1136 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1137 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1138 				       scache_size >> 10,
1139 				       way_string[c->scache.ways], c->scache.linesz);
1140 			}
1141 #else
1142 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1143 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1144 #endif
1145 			return;
1146 		}
1147 		sc_present = 0;
1148 	}
1149 
1150 	if (!sc_present)
1151 		return;
1152 
1153 	/* compute a couple of other cache variables */
1154 	c->scache.waysize = scache_size / c->scache.ways;
1155 
1156 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1157 
1158 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1159 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1160 
1161 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1162 }
1163 
1164 void au1x00_fixup_config_od(void)
1165 {
1166 	/*
1167 	 * c0_config.od (bit 19) was write only (and read as 0)
1168 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1169 	 * transaction overlapping and needs to be set to fix various errata.
1170 	 */
1171 	switch (read_c0_prid()) {
1172 	case 0x00030100: /* Au1000 DA */
1173 	case 0x00030201: /* Au1000 HA */
1174 	case 0x00030202: /* Au1000 HB */
1175 	case 0x01030200: /* Au1500 AB */
1176 	/*
1177 	 * Au1100 errata actually keeps silence about this bit, so we set it
1178 	 * just in case for those revisions that require it to be set according
1179 	 * to arch/mips/au1000/common/cputable.c
1180 	 */
1181 	case 0x02030200: /* Au1100 AB */
1182 	case 0x02030201: /* Au1100 BA */
1183 	case 0x02030202: /* Au1100 BC */
1184 		set_c0_config(1 << 19);
1185 		break;
1186 	}
1187 }
1188 
1189 static void __init coherency_setup(void)
1190 {
1191 	change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1192 
1193 	/*
1194 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1195 	 * the coherency mode specified by the TLB; 1 means cachable
1196 	 * coherent update on write will be used.  Not all processors have
1197 	 * this bit and; some wire it to zero, others like Toshiba had the
1198 	 * silly idea of putting something else there ...
1199 	 */
1200 	switch (current_cpu_type()) {
1201 	case CPU_R4000PC:
1202 	case CPU_R4000SC:
1203 	case CPU_R4000MC:
1204 	case CPU_R4400PC:
1205 	case CPU_R4400SC:
1206 	case CPU_R4400MC:
1207 		clear_c0_config(CONF_CU);
1208 		break;
1209 	/*
1210 	 * We need to catch the early Alchemy SOCs with
1211 	 * the write-only co_config.od bit and set it back to one...
1212 	 */
1213 	case CPU_AU1000: /* rev. DA, HA, HB */
1214 	case CPU_AU1100: /* rev. AB, BA, BC ?? */
1215 	case CPU_AU1500: /* rev. AB */
1216 		au1x00_fixup_config_od();
1217 		break;
1218 	}
1219 }
1220 
1221 void __init r4k_cache_init(void)
1222 {
1223 	extern void build_clear_page(void);
1224 	extern void build_copy_page(void);
1225 	extern char __weak except_vec2_generic;
1226 	extern char __weak except_vec2_sb1;
1227 	struct cpuinfo_mips *c = &current_cpu_data;
1228 
1229 	switch (c->cputype) {
1230 	case CPU_SB1:
1231 	case CPU_SB1A:
1232 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1233 		break;
1234 
1235 	default:
1236 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1237 		break;
1238 	}
1239 
1240 	probe_pcache();
1241 	setup_scache();
1242 
1243 	r4k_blast_dcache_page_setup();
1244 	r4k_blast_dcache_page_indexed_setup();
1245 	r4k_blast_dcache_setup();
1246 	r4k_blast_icache_page_setup();
1247 	r4k_blast_icache_page_indexed_setup();
1248 	r4k_blast_icache_setup();
1249 	r4k_blast_scache_page_setup();
1250 	r4k_blast_scache_page_indexed_setup();
1251 	r4k_blast_scache_setup();
1252 
1253 	/*
1254 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1255 	 * This code supports virtually indexed processors and will be
1256 	 * unnecessarily inefficient on physically indexed processors.
1257 	 */
1258 	if (c->dcache.linesz)
1259 		shm_align_mask = max_t( unsigned long,
1260 					c->dcache.sets * c->dcache.linesz - 1,
1261 					PAGE_SIZE - 1);
1262 	else
1263 		shm_align_mask = PAGE_SIZE-1;
1264 	flush_cache_all		= cache_noop;
1265 	__flush_cache_all	= r4k___flush_cache_all;
1266 	flush_cache_mm		= r4k_flush_cache_mm;
1267 	flush_cache_page	= r4k_flush_cache_page;
1268 	flush_cache_range	= r4k_flush_cache_range;
1269 
1270 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1271 	flush_icache_all	= r4k_flush_icache_all;
1272 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1273 	flush_data_cache_page	= r4k_flush_data_cache_page;
1274 	flush_icache_range	= r4k_flush_icache_range;
1275 
1276 #ifdef CONFIG_DMA_NONCOHERENT
1277 	_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1278 	_dma_cache_wback	= r4k_dma_cache_wback_inv;
1279 	_dma_cache_inv		= r4k_dma_cache_inv;
1280 #endif
1281 
1282 	build_clear_page();
1283 	build_copy_page();
1284 	local_r4k___flush_cache_all(NULL);
1285 	coherency_setup();
1286 }
1287