xref: /linux/arch/mips/mm/c-r4k.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/bitops.h>
15 
16 #include <asm/bcache.h>
17 #include <asm/bootinfo.h>
18 #include <asm/cache.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-features.h>
22 #include <asm/io.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/r4kcache.h>
26 #include <asm/system.h>
27 #include <asm/mmu_context.h>
28 #include <asm/war.h>
29 #include <asm/cacheflush.h> /* for run_uncached() */
30 
31 
32 /*
33  * Special Variant of smp_call_function for use by cache functions:
34  *
35  *  o No return value
36  *  o collapses to normal function call on UP kernels
37  *  o collapses to normal function call on systems with a single shared
38  *    primary cache.
39  */
40 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
41                                    int retry, int wait)
42 {
43 	preempt_disable();
44 
45 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
46 	smp_call_function(func, info, retry, wait);
47 #endif
48 	func(info);
49 	preempt_enable();
50 }
51 
52 /*
53  * Must die.
54  */
55 static unsigned long icache_size __read_mostly;
56 static unsigned long dcache_size __read_mostly;
57 static unsigned long scache_size __read_mostly;
58 
59 /*
60  * Dummy cache handling routines for machines without boardcaches
61  */
62 static void cache_noop(void) {}
63 
64 static struct bcache_ops no_sc_ops = {
65 	.bc_enable = (void *)cache_noop,
66 	.bc_disable = (void *)cache_noop,
67 	.bc_wback_inv = (void *)cache_noop,
68 	.bc_inv = (void *)cache_noop
69 };
70 
71 struct bcache_ops *bcops = &no_sc_ops;
72 
73 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
74 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
75 
76 #define R4600_HIT_CACHEOP_WAR_IMPL					\
77 do {									\
78 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
79 		*(volatile unsigned long *)CKSEG1;			\
80 	if (R4600_V1_HIT_CACHEOP_WAR)					\
81 		__asm__ __volatile__("nop;nop;nop;nop");		\
82 } while (0)
83 
84 static void (*r4k_blast_dcache_page)(unsigned long addr);
85 
86 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
87 {
88 	R4600_HIT_CACHEOP_WAR_IMPL;
89 	blast_dcache32_page(addr);
90 }
91 
92 static void __init r4k_blast_dcache_page_setup(void)
93 {
94 	unsigned long  dc_lsize = cpu_dcache_line_size();
95 
96 	if (dc_lsize == 0)
97 		r4k_blast_dcache_page = (void *)cache_noop;
98 	else if (dc_lsize == 16)
99 		r4k_blast_dcache_page = blast_dcache16_page;
100 	else if (dc_lsize == 32)
101 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
102 }
103 
104 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
105 
106 static void __init r4k_blast_dcache_page_indexed_setup(void)
107 {
108 	unsigned long dc_lsize = cpu_dcache_line_size();
109 
110 	if (dc_lsize == 0)
111 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
112 	else if (dc_lsize == 16)
113 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
114 	else if (dc_lsize == 32)
115 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
116 }
117 
118 static void (* r4k_blast_dcache)(void);
119 
120 static void __init r4k_blast_dcache_setup(void)
121 {
122 	unsigned long dc_lsize = cpu_dcache_line_size();
123 
124 	if (dc_lsize == 0)
125 		r4k_blast_dcache = (void *)cache_noop;
126 	else if (dc_lsize == 16)
127 		r4k_blast_dcache = blast_dcache16;
128 	else if (dc_lsize == 32)
129 		r4k_blast_dcache = blast_dcache32;
130 }
131 
132 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
133 #define JUMP_TO_ALIGN(order) \
134 	__asm__ __volatile__( \
135 		"b\t1f\n\t" \
136 		".align\t" #order "\n\t" \
137 		"1:\n\t" \
138 		)
139 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
140 #define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)
141 
142 static inline void blast_r4600_v1_icache32(void)
143 {
144 	unsigned long flags;
145 
146 	local_irq_save(flags);
147 	blast_icache32();
148 	local_irq_restore(flags);
149 }
150 
151 static inline void tx49_blast_icache32(void)
152 {
153 	unsigned long start = INDEX_BASE;
154 	unsigned long end = start + current_cpu_data.icache.waysize;
155 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
156 	unsigned long ws_end = current_cpu_data.icache.ways <<
157 	                       current_cpu_data.icache.waybit;
158 	unsigned long ws, addr;
159 
160 	CACHE32_UNROLL32_ALIGN2;
161 	/* I'm in even chunk.  blast odd chunks */
162 	for (ws = 0; ws < ws_end; ws += ws_inc)
163 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
164 			cache32_unroll32(addr|ws,Index_Invalidate_I);
165 	CACHE32_UNROLL32_ALIGN;
166 	/* I'm in odd chunk.  blast even chunks */
167 	for (ws = 0; ws < ws_end; ws += ws_inc)
168 		for (addr = start; addr < end; addr += 0x400 * 2)
169 			cache32_unroll32(addr|ws,Index_Invalidate_I);
170 }
171 
172 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
173 {
174 	unsigned long flags;
175 
176 	local_irq_save(flags);
177 	blast_icache32_page_indexed(page);
178 	local_irq_restore(flags);
179 }
180 
181 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
182 {
183 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
184 	unsigned long start = INDEX_BASE + (page & indexmask);
185 	unsigned long end = start + PAGE_SIZE;
186 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
187 	unsigned long ws_end = current_cpu_data.icache.ways <<
188 	                       current_cpu_data.icache.waybit;
189 	unsigned long ws, addr;
190 
191 	CACHE32_UNROLL32_ALIGN2;
192 	/* I'm in even chunk.  blast odd chunks */
193 	for (ws = 0; ws < ws_end; ws += ws_inc)
194 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
195 			cache32_unroll32(addr|ws,Index_Invalidate_I);
196 	CACHE32_UNROLL32_ALIGN;
197 	/* I'm in odd chunk.  blast even chunks */
198 	for (ws = 0; ws < ws_end; ws += ws_inc)
199 		for (addr = start; addr < end; addr += 0x400 * 2)
200 			cache32_unroll32(addr|ws,Index_Invalidate_I);
201 }
202 
203 static void (* r4k_blast_icache_page)(unsigned long addr);
204 
205 static void __init r4k_blast_icache_page_setup(void)
206 {
207 	unsigned long ic_lsize = cpu_icache_line_size();
208 
209 	if (ic_lsize == 0)
210 		r4k_blast_icache_page = (void *)cache_noop;
211 	else if (ic_lsize == 16)
212 		r4k_blast_icache_page = blast_icache16_page;
213 	else if (ic_lsize == 32)
214 		r4k_blast_icache_page = blast_icache32_page;
215 	else if (ic_lsize == 64)
216 		r4k_blast_icache_page = blast_icache64_page;
217 }
218 
219 
220 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
221 
222 static void __init r4k_blast_icache_page_indexed_setup(void)
223 {
224 	unsigned long ic_lsize = cpu_icache_line_size();
225 
226 	if (ic_lsize == 0)
227 		r4k_blast_icache_page_indexed = (void *)cache_noop;
228 	else if (ic_lsize == 16)
229 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
230 	else if (ic_lsize == 32) {
231 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
232 			r4k_blast_icache_page_indexed =
233 				blast_icache32_r4600_v1_page_indexed;
234 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
235 			r4k_blast_icache_page_indexed =
236 				tx49_blast_icache32_page_indexed;
237 		else
238 			r4k_blast_icache_page_indexed =
239 				blast_icache32_page_indexed;
240 	} else if (ic_lsize == 64)
241 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
242 }
243 
244 static void (* r4k_blast_icache)(void);
245 
246 static void __init r4k_blast_icache_setup(void)
247 {
248 	unsigned long ic_lsize = cpu_icache_line_size();
249 
250 	if (ic_lsize == 0)
251 		r4k_blast_icache = (void *)cache_noop;
252 	else if (ic_lsize == 16)
253 		r4k_blast_icache = blast_icache16;
254 	else if (ic_lsize == 32) {
255 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
256 			r4k_blast_icache = blast_r4600_v1_icache32;
257 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
258 			r4k_blast_icache = tx49_blast_icache32;
259 		else
260 			r4k_blast_icache = blast_icache32;
261 	} else if (ic_lsize == 64)
262 		r4k_blast_icache = blast_icache64;
263 }
264 
265 static void (* r4k_blast_scache_page)(unsigned long addr);
266 
267 static void __init r4k_blast_scache_page_setup(void)
268 {
269 	unsigned long sc_lsize = cpu_scache_line_size();
270 
271 	if (scache_size == 0)
272 		r4k_blast_scache_page = (void *)cache_noop;
273 	else if (sc_lsize == 16)
274 		r4k_blast_scache_page = blast_scache16_page;
275 	else if (sc_lsize == 32)
276 		r4k_blast_scache_page = blast_scache32_page;
277 	else if (sc_lsize == 64)
278 		r4k_blast_scache_page = blast_scache64_page;
279 	else if (sc_lsize == 128)
280 		r4k_blast_scache_page = blast_scache128_page;
281 }
282 
283 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
284 
285 static void __init r4k_blast_scache_page_indexed_setup(void)
286 {
287 	unsigned long sc_lsize = cpu_scache_line_size();
288 
289 	if (scache_size == 0)
290 		r4k_blast_scache_page_indexed = (void *)cache_noop;
291 	else if (sc_lsize == 16)
292 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
293 	else if (sc_lsize == 32)
294 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
295 	else if (sc_lsize == 64)
296 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
297 	else if (sc_lsize == 128)
298 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
299 }
300 
301 static void (* r4k_blast_scache)(void);
302 
303 static void __init r4k_blast_scache_setup(void)
304 {
305 	unsigned long sc_lsize = cpu_scache_line_size();
306 
307 	if (scache_size == 0)
308 		r4k_blast_scache = (void *)cache_noop;
309 	else if (sc_lsize == 16)
310 		r4k_blast_scache = blast_scache16;
311 	else if (sc_lsize == 32)
312 		r4k_blast_scache = blast_scache32;
313 	else if (sc_lsize == 64)
314 		r4k_blast_scache = blast_scache64;
315 	else if (sc_lsize == 128)
316 		r4k_blast_scache = blast_scache128;
317 }
318 
319 /*
320  * This is former mm's flush_cache_all() which really should be
321  * flush_cache_vunmap these days ...
322  */
323 static inline void local_r4k_flush_cache_all(void * args)
324 {
325 	r4k_blast_dcache();
326 }
327 
328 static void r4k_flush_cache_all(void)
329 {
330 	if (!cpu_has_dc_aliases)
331 		return;
332 
333 	r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
334 }
335 
336 static inline void local_r4k___flush_cache_all(void * args)
337 {
338 	r4k_blast_dcache();
339 	r4k_blast_icache();
340 
341 	switch (current_cpu_data.cputype) {
342 	case CPU_R4000SC:
343 	case CPU_R4000MC:
344 	case CPU_R4400SC:
345 	case CPU_R4400MC:
346 	case CPU_R10000:
347 	case CPU_R12000:
348 	case CPU_R14000:
349 		r4k_blast_scache();
350 	}
351 }
352 
353 static void r4k___flush_cache_all(void)
354 {
355 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
356 }
357 
358 static inline void local_r4k_flush_cache_range(void * args)
359 {
360 	struct vm_area_struct *vma = args;
361 
362 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
363 		return;
364 
365 	r4k_blast_dcache();
366 }
367 
368 static void r4k_flush_cache_range(struct vm_area_struct *vma,
369 	unsigned long start, unsigned long end)
370 {
371 	if (!cpu_has_dc_aliases)
372 		return;
373 
374 	r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
375 }
376 
377 static inline void local_r4k_flush_cache_mm(void * args)
378 {
379 	struct mm_struct *mm = args;
380 
381 	if (!cpu_context(smp_processor_id(), mm))
382 		return;
383 
384 	/*
385 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
386 	 * only flush the primary caches but R10000 and R12000 behave sane ...
387 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
388 	 * caches, so we can bail out early.
389 	 */
390 	if (current_cpu_data.cputype == CPU_R4000SC ||
391 	    current_cpu_data.cputype == CPU_R4000MC ||
392 	    current_cpu_data.cputype == CPU_R4400SC ||
393 	    current_cpu_data.cputype == CPU_R4400MC) {
394 		r4k_blast_scache();
395 		return;
396 	}
397 
398 	r4k_blast_dcache();
399 }
400 
401 static void r4k_flush_cache_mm(struct mm_struct *mm)
402 {
403 	if (!cpu_has_dc_aliases)
404 		return;
405 
406 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
407 }
408 
409 struct flush_cache_page_args {
410 	struct vm_area_struct *vma;
411 	unsigned long addr;
412 	unsigned long pfn;
413 };
414 
415 static inline void local_r4k_flush_cache_page(void *args)
416 {
417 	struct flush_cache_page_args *fcp_args = args;
418 	struct vm_area_struct *vma = fcp_args->vma;
419 	unsigned long addr = fcp_args->addr;
420 	unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
421 	int exec = vma->vm_flags & VM_EXEC;
422 	struct mm_struct *mm = vma->vm_mm;
423 	pgd_t *pgdp;
424 	pud_t *pudp;
425 	pmd_t *pmdp;
426 	pte_t *ptep;
427 
428 	/*
429 	 * If ownes no valid ASID yet, cannot possibly have gotten
430 	 * this page into the cache.
431 	 */
432 	if (cpu_context(smp_processor_id(), mm) == 0)
433 		return;
434 
435 	addr &= PAGE_MASK;
436 	pgdp = pgd_offset(mm, addr);
437 	pudp = pud_offset(pgdp, addr);
438 	pmdp = pmd_offset(pudp, addr);
439 	ptep = pte_offset(pmdp, addr);
440 
441 	/*
442 	 * If the page isn't marked valid, the page cannot possibly be
443 	 * in the cache.
444 	 */
445 	if (!(pte_val(*ptep) & _PAGE_PRESENT))
446 		return;
447 
448 	/*
449 	 * Doing flushes for another ASID than the current one is
450 	 * too difficult since stupid R4k caches do a TLB translation
451 	 * for every cache flush operation.  So we do indexed flushes
452 	 * in that case, which doesn't overly flush the cache too much.
453 	 */
454 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
455 		if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
456 			r4k_blast_dcache_page(addr);
457 			if (exec && !cpu_icache_snoops_remote_store)
458 				r4k_blast_scache_page(addr);
459 		}
460 		if (exec)
461 			r4k_blast_icache_page(addr);
462 
463 		return;
464 	}
465 
466 	/*
467 	 * Do indexed flush, too much work to get the (possible) TLB refills
468 	 * to work correctly.
469 	 */
470 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
471 		r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
472 					      paddr : addr);
473 		if (exec && !cpu_icache_snoops_remote_store) {
474 			r4k_blast_scache_page_indexed(paddr);
475 		}
476 	}
477 	if (exec) {
478 		if (cpu_has_vtag_icache && mm == current->active_mm) {
479 			int cpu = smp_processor_id();
480 
481 			if (cpu_context(cpu, mm) != 0)
482 				drop_mmu_context(mm, cpu);
483 		} else
484 			r4k_blast_icache_page_indexed(addr);
485 	}
486 }
487 
488 static void r4k_flush_cache_page(struct vm_area_struct *vma,
489 	unsigned long addr, unsigned long pfn)
490 {
491 	struct flush_cache_page_args args;
492 
493 	args.vma = vma;
494 	args.addr = addr;
495 	args.pfn = pfn;
496 
497 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
498 }
499 
500 static inline void local_r4k_flush_data_cache_page(void * addr)
501 {
502 	r4k_blast_dcache_page((unsigned long) addr);
503 }
504 
505 static void r4k_flush_data_cache_page(unsigned long addr)
506 {
507 	r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
508 }
509 
510 struct flush_icache_range_args {
511 	unsigned long start;
512 	unsigned long end;
513 };
514 
515 static inline void local_r4k_flush_icache_range(void *args)
516 {
517 	struct flush_icache_range_args *fir_args = args;
518 	unsigned long start = fir_args->start;
519 	unsigned long end = fir_args->end;
520 
521 	if (!cpu_has_ic_fills_f_dc) {
522 		if (end - start >= dcache_size) {
523 			r4k_blast_dcache();
524 		} else {
525 			R4600_HIT_CACHEOP_WAR_IMPL;
526 			protected_blast_dcache_range(start, end);
527 		}
528 
529 		if (!cpu_icache_snoops_remote_store && scache_size) {
530 			if (end - start > scache_size)
531 				r4k_blast_scache();
532 			else
533 				protected_blast_scache_range(start, end);
534 		}
535 	}
536 
537 	if (end - start > icache_size)
538 		r4k_blast_icache();
539 	else
540 		protected_blast_icache_range(start, end);
541 }
542 
543 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
544 {
545 	struct flush_icache_range_args args;
546 
547 	args.start = start;
548 	args.end = end;
549 
550 	r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
551 	instruction_hazard();
552 }
553 
554 #ifdef CONFIG_DMA_NONCOHERENT
555 
556 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
557 {
558 	/* Catch bad driver code */
559 	BUG_ON(size == 0);
560 
561 	if (cpu_has_inclusive_pcaches) {
562 		if (size >= scache_size)
563 			r4k_blast_scache();
564 		else
565 			blast_scache_range(addr, addr + size);
566 		return;
567 	}
568 
569 	/*
570 	 * Either no secondary cache or the available caches don't have the
571 	 * subset property so we have to flush the primary caches
572 	 * explicitly
573 	 */
574 	if (size >= dcache_size) {
575 		r4k_blast_dcache();
576 	} else {
577 		R4600_HIT_CACHEOP_WAR_IMPL;
578 		blast_dcache_range(addr, addr + size);
579 	}
580 
581 	bc_wback_inv(addr, size);
582 }
583 
584 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
585 {
586 	/* Catch bad driver code */
587 	BUG_ON(size == 0);
588 
589 	if (cpu_has_inclusive_pcaches) {
590 		if (size >= scache_size)
591 			r4k_blast_scache();
592 		else
593 			blast_scache_range(addr, addr + size);
594 		return;
595 	}
596 
597 	if (size >= dcache_size) {
598 		r4k_blast_dcache();
599 	} else {
600 		R4600_HIT_CACHEOP_WAR_IMPL;
601 		blast_dcache_range(addr, addr + size);
602 	}
603 
604 	bc_inv(addr, size);
605 }
606 #endif /* CONFIG_DMA_NONCOHERENT */
607 
608 /*
609  * While we're protected against bad userland addresses we don't care
610  * very much about what happens in that case.  Usually a segmentation
611  * fault will dump the process later on anyway ...
612  */
613 static void local_r4k_flush_cache_sigtramp(void * arg)
614 {
615 	unsigned long ic_lsize = cpu_icache_line_size();
616 	unsigned long dc_lsize = cpu_dcache_line_size();
617 	unsigned long sc_lsize = cpu_scache_line_size();
618 	unsigned long addr = (unsigned long) arg;
619 
620 	R4600_HIT_CACHEOP_WAR_IMPL;
621 	if (dc_lsize)
622 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
623 	if (!cpu_icache_snoops_remote_store && scache_size)
624 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
625 	if (ic_lsize)
626 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
627 	if (MIPS4K_ICACHE_REFILL_WAR) {
628 		__asm__ __volatile__ (
629 			".set push\n\t"
630 			".set noat\n\t"
631 			".set mips3\n\t"
632 #ifdef CONFIG_32BIT
633 			"la	$at,1f\n\t"
634 #endif
635 #ifdef CONFIG_64BIT
636 			"dla	$at,1f\n\t"
637 #endif
638 			"cache	%0,($at)\n\t"
639 			"nop; nop; nop\n"
640 			"1:\n\t"
641 			".set pop"
642 			:
643 			: "i" (Hit_Invalidate_I));
644 	}
645 	if (MIPS_CACHE_SYNC_WAR)
646 		__asm__ __volatile__ ("sync");
647 }
648 
649 static void r4k_flush_cache_sigtramp(unsigned long addr)
650 {
651 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
652 }
653 
654 static void r4k_flush_icache_all(void)
655 {
656 	if (cpu_has_vtag_icache)
657 		r4k_blast_icache();
658 }
659 
660 static inline void rm7k_erratum31(void)
661 {
662 	const unsigned long ic_lsize = 32;
663 	unsigned long addr;
664 
665 	/* RM7000 erratum #31. The icache is screwed at startup. */
666 	write_c0_taglo(0);
667 	write_c0_taghi(0);
668 
669 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
670 		__asm__ __volatile__ (
671 			".set push\n\t"
672 			".set noreorder\n\t"
673 			".set mips3\n\t"
674 			"cache\t%1, 0(%0)\n\t"
675 			"cache\t%1, 0x1000(%0)\n\t"
676 			"cache\t%1, 0x2000(%0)\n\t"
677 			"cache\t%1, 0x3000(%0)\n\t"
678 			"cache\t%2, 0(%0)\n\t"
679 			"cache\t%2, 0x1000(%0)\n\t"
680 			"cache\t%2, 0x2000(%0)\n\t"
681 			"cache\t%2, 0x3000(%0)\n\t"
682 			"cache\t%1, 0(%0)\n\t"
683 			"cache\t%1, 0x1000(%0)\n\t"
684 			"cache\t%1, 0x2000(%0)\n\t"
685 			"cache\t%1, 0x3000(%0)\n\t"
686 			".set pop\n"
687 			:
688 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
689 	}
690 }
691 
692 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
693 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
694 };
695 
696 static void __init probe_pcache(void)
697 {
698 	struct cpuinfo_mips *c = &current_cpu_data;
699 	unsigned int config = read_c0_config();
700 	unsigned int prid = read_c0_prid();
701 	unsigned long config1;
702 	unsigned int lsize;
703 
704 	switch (c->cputype) {
705 	case CPU_R4600:			/* QED style two way caches? */
706 	case CPU_R4700:
707 	case CPU_R5000:
708 	case CPU_NEVADA:
709 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
710 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
711 		c->icache.ways = 2;
712 		c->icache.waybit = __ffs(icache_size/2);
713 
714 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
715 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
716 		c->dcache.ways = 2;
717 		c->dcache.waybit= __ffs(dcache_size/2);
718 
719 		c->options |= MIPS_CPU_CACHE_CDEX_P;
720 		break;
721 
722 	case CPU_R5432:
723 	case CPU_R5500:
724 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
725 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
726 		c->icache.ways = 2;
727 		c->icache.waybit= 0;
728 
729 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
730 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
731 		c->dcache.ways = 2;
732 		c->dcache.waybit = 0;
733 
734 		c->options |= MIPS_CPU_CACHE_CDEX_P;
735 		break;
736 
737 	case CPU_TX49XX:
738 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
739 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
740 		c->icache.ways = 4;
741 		c->icache.waybit= 0;
742 
743 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
744 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
745 		c->dcache.ways = 4;
746 		c->dcache.waybit = 0;
747 
748 		c->options |= MIPS_CPU_CACHE_CDEX_P;
749 		c->options |= MIPS_CPU_PREFETCH;
750 		break;
751 
752 	case CPU_R4000PC:
753 	case CPU_R4000SC:
754 	case CPU_R4000MC:
755 	case CPU_R4400PC:
756 	case CPU_R4400SC:
757 	case CPU_R4400MC:
758 	case CPU_R4300:
759 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
760 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
761 		c->icache.ways = 1;
762 		c->icache.waybit = 0; 	/* doesn't matter */
763 
764 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
765 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
766 		c->dcache.ways = 1;
767 		c->dcache.waybit = 0;	/* does not matter */
768 
769 		c->options |= MIPS_CPU_CACHE_CDEX_P;
770 		break;
771 
772 	case CPU_R10000:
773 	case CPU_R12000:
774 	case CPU_R14000:
775 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
776 		c->icache.linesz = 64;
777 		c->icache.ways = 2;
778 		c->icache.waybit = 0;
779 
780 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
781 		c->dcache.linesz = 32;
782 		c->dcache.ways = 2;
783 		c->dcache.waybit = 0;
784 
785 		c->options |= MIPS_CPU_PREFETCH;
786 		break;
787 
788 	case CPU_VR4133:
789 		write_c0_config(config & ~VR41_CONF_P4K);
790 	case CPU_VR4131:
791 		/* Workaround for cache instruction bug of VR4131 */
792 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
793 		    c->processor_id == 0x0c82U) {
794 			config |= 0x00400000U;
795 			if (c->processor_id == 0x0c80U)
796 				config |= VR41_CONF_BP;
797 			write_c0_config(config);
798 		} else
799 			c->options |= MIPS_CPU_CACHE_CDEX_P;
800 
801 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
802 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
803 		c->icache.ways = 2;
804 		c->icache.waybit = __ffs(icache_size/2);
805 
806 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
807 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
808 		c->dcache.ways = 2;
809 		c->dcache.waybit = __ffs(dcache_size/2);
810 		break;
811 
812 	case CPU_VR41XX:
813 	case CPU_VR4111:
814 	case CPU_VR4121:
815 	case CPU_VR4122:
816 	case CPU_VR4181:
817 	case CPU_VR4181A:
818 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
819 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
820 		c->icache.ways = 1;
821 		c->icache.waybit = 0; 	/* doesn't matter */
822 
823 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
824 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
825 		c->dcache.ways = 1;
826 		c->dcache.waybit = 0;	/* does not matter */
827 
828 		c->options |= MIPS_CPU_CACHE_CDEX_P;
829 		break;
830 
831 	case CPU_RM7000:
832 		rm7k_erratum31();
833 
834 	case CPU_RM9000:
835 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
836 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
837 		c->icache.ways = 4;
838 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
839 
840 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
841 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
842 		c->dcache.ways = 4;
843 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
844 
845 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
846 		c->options |= MIPS_CPU_CACHE_CDEX_P;
847 #endif
848 		c->options |= MIPS_CPU_PREFETCH;
849 		break;
850 
851 	default:
852 		if (!(config & MIPS_CONF_M))
853 			panic("Don't know how to probe P-caches on this cpu.");
854 
855 		/*
856 		 * So we seem to be a MIPS32 or MIPS64 CPU
857 		 * So let's probe the I-cache ...
858 		 */
859 		config1 = read_c0_config1();
860 
861 		if ((lsize = ((config1 >> 19) & 7)))
862 			c->icache.linesz = 2 << lsize;
863 		else
864 			c->icache.linesz = lsize;
865 		c->icache.sets = 64 << ((config1 >> 22) & 7);
866 		c->icache.ways = 1 + ((config1 >> 16) & 7);
867 
868 		icache_size = c->icache.sets *
869 		              c->icache.ways *
870 		              c->icache.linesz;
871 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
872 
873 		if (config & 0x8)		/* VI bit */
874 			c->icache.flags |= MIPS_CACHE_VTAG;
875 
876 		/*
877 		 * Now probe the MIPS32 / MIPS64 data cache.
878 		 */
879 		c->dcache.flags = 0;
880 
881 		if ((lsize = ((config1 >> 10) & 7)))
882 			c->dcache.linesz = 2 << lsize;
883 		else
884 			c->dcache.linesz= lsize;
885 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
886 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
887 
888 		dcache_size = c->dcache.sets *
889 		              c->dcache.ways *
890 		              c->dcache.linesz;
891 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
892 
893 		c->options |= MIPS_CPU_PREFETCH;
894 		break;
895 	}
896 
897 	/*
898 	 * Processor configuration sanity check for the R4000SC erratum
899 	 * #5.  With page sizes larger than 32kB there is no possibility
900 	 * to get a VCE exception anymore so we don't care about this
901 	 * misconfiguration.  The case is rather theoretical anyway;
902 	 * presumably no vendor is shipping his hardware in the "bad"
903 	 * configuration.
904 	 */
905 	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
906 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
907 	    PAGE_SIZE <= 0x8000)
908 		panic("Improper R4000SC processor configuration detected");
909 
910 	/* compute a couple of other cache variables */
911 	c->icache.waysize = icache_size / c->icache.ways;
912 	c->dcache.waysize = dcache_size / c->dcache.ways;
913 
914 	c->icache.sets = c->icache.linesz ?
915 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
916 	c->dcache.sets = c->dcache.linesz ?
917 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
918 
919 	/*
920 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
921 	 * 2-way virtually indexed so normally would suffer from aliases.  So
922 	 * normally they'd suffer from aliases but magic in the hardware deals
923 	 * with that for us so we don't need to take care ourselves.
924 	 */
925 	switch (c->cputype) {
926 	case CPU_20KC:
927 	case CPU_25KF:
928 		c->dcache.flags |= MIPS_CACHE_PINDEX;
929 	case CPU_R10000:
930 	case CPU_R12000:
931 	case CPU_R14000:
932 	case CPU_SB1:
933 		break;
934 	case CPU_24K:
935 	case CPU_34K:
936 	case CPU_74K:
937 		if ((read_c0_config7() & (1 << 16))) {
938 			/* effectively physically indexed dcache,
939 			   thus no virtual aliases. */
940 			c->dcache.flags |= MIPS_CACHE_PINDEX;
941 			break;
942 		}
943 	default:
944 		if (c->dcache.waysize > PAGE_SIZE)
945 			c->dcache.flags |= MIPS_CACHE_ALIASES;
946 	}
947 
948 	switch (c->cputype) {
949 	case CPU_20KC:
950 		/*
951 		 * Some older 20Kc chips doesn't have the 'VI' bit in
952 		 * the config register.
953 		 */
954 		c->icache.flags |= MIPS_CACHE_VTAG;
955 		break;
956 
957 	case CPU_AU1000:
958 	case CPU_AU1500:
959 	case CPU_AU1100:
960 	case CPU_AU1550:
961 	case CPU_AU1200:
962 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
963 		break;
964 	}
965 
966 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
967 	       icache_size >> 10,
968 	       cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
969 	       way_string[c->icache.ways], c->icache.linesz);
970 
971 	printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
972 	       dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
973 }
974 
975 /*
976  * If you even _breathe_ on this function, look at the gcc output and make sure
977  * it does not pop things on and off the stack for the cache sizing loop that
978  * executes in KSEG1 space or else you will crash and burn badly.  You have
979  * been warned.
980  */
981 static int __init probe_scache(void)
982 {
983 	extern unsigned long stext;
984 	unsigned long flags, addr, begin, end, pow2;
985 	unsigned int config = read_c0_config();
986 	struct cpuinfo_mips *c = &current_cpu_data;
987 	int tmp;
988 
989 	if (config & CONF_SC)
990 		return 0;
991 
992 	begin = (unsigned long) &stext;
993 	begin &= ~((4 * 1024 * 1024) - 1);
994 	end = begin + (4 * 1024 * 1024);
995 
996 	/*
997 	 * This is such a bitch, you'd think they would make it easy to do
998 	 * this.  Away you daemons of stupidity!
999 	 */
1000 	local_irq_save(flags);
1001 
1002 	/* Fill each size-multiple cache line with a valid tag. */
1003 	pow2 = (64 * 1024);
1004 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1005 		unsigned long *p = (unsigned long *) addr;
1006 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1007 		pow2 <<= 1;
1008 	}
1009 
1010 	/* Load first line with zero (therefore invalid) tag. */
1011 	write_c0_taglo(0);
1012 	write_c0_taghi(0);
1013 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1014 	cache_op(Index_Store_Tag_I, begin);
1015 	cache_op(Index_Store_Tag_D, begin);
1016 	cache_op(Index_Store_Tag_SD, begin);
1017 
1018 	/* Now search for the wrap around point. */
1019 	pow2 = (128 * 1024);
1020 	tmp = 0;
1021 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1022 		cache_op(Index_Load_Tag_SD, addr);
1023 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1024 		if (!read_c0_taglo())
1025 			break;
1026 		pow2 <<= 1;
1027 	}
1028 	local_irq_restore(flags);
1029 	addr -= begin;
1030 
1031 	scache_size = addr;
1032 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1033 	c->scache.ways = 1;
1034 	c->dcache.waybit = 0;		/* does not matter */
1035 
1036 	return 1;
1037 }
1038 
1039 extern int r5k_sc_init(void);
1040 extern int rm7k_sc_init(void);
1041 extern int mips_sc_init(void);
1042 
1043 static void __init setup_scache(void)
1044 {
1045 	struct cpuinfo_mips *c = &current_cpu_data;
1046 	unsigned int config = read_c0_config();
1047 	int sc_present = 0;
1048 
1049 	/*
1050 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1051 	 * processors don't have a S-cache that would be relevant to the
1052 	 * Linux memory managment.
1053 	 */
1054 	switch (c->cputype) {
1055 	case CPU_R4000SC:
1056 	case CPU_R4000MC:
1057 	case CPU_R4400SC:
1058 	case CPU_R4400MC:
1059 		sc_present = run_uncached(probe_scache);
1060 		if (sc_present)
1061 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1062 		break;
1063 
1064 	case CPU_R10000:
1065 	case CPU_R12000:
1066 	case CPU_R14000:
1067 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1068 		c->scache.linesz = 64 << ((config >> 13) & 1);
1069 		c->scache.ways = 2;
1070 		c->scache.waybit= 0;
1071 		sc_present = 1;
1072 		break;
1073 
1074 	case CPU_R5000:
1075 	case CPU_NEVADA:
1076 #ifdef CONFIG_R5000_CPU_SCACHE
1077 		r5k_sc_init();
1078 #endif
1079                 return;
1080 
1081 	case CPU_RM7000:
1082 	case CPU_RM9000:
1083 #ifdef CONFIG_RM7000_CPU_SCACHE
1084 		rm7k_sc_init();
1085 #endif
1086 		return;
1087 
1088 	default:
1089 		if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1090 		    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1091 		    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1092 		    c->isa_level == MIPS_CPU_ISA_M64R2) {
1093 #ifdef CONFIG_MIPS_CPU_SCACHE
1094 			if (mips_sc_init ()) {
1095 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1096 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1097 				       scache_size >> 10,
1098 				       way_string[c->scache.ways], c->scache.linesz);
1099 			}
1100 #else
1101 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1102 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1103 #endif
1104 			return;
1105 		}
1106 		sc_present = 0;
1107 	}
1108 
1109 	if (!sc_present)
1110 		return;
1111 
1112 	/* compute a couple of other cache variables */
1113 	c->scache.waysize = scache_size / c->scache.ways;
1114 
1115 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1116 
1117 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1118 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1119 
1120 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1121 }
1122 
1123 void au1x00_fixup_config_od(void)
1124 {
1125 	/*
1126 	 * c0_config.od (bit 19) was write only (and read as 0)
1127 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1128 	 * transaction overlapping and needs to be set to fix various errata.
1129 	 */
1130 	switch (read_c0_prid()) {
1131 	case 0x00030100: /* Au1000 DA */
1132 	case 0x00030201: /* Au1000 HA */
1133 	case 0x00030202: /* Au1000 HB */
1134 	case 0x01030200: /* Au1500 AB */
1135 	/*
1136 	 * Au1100 errata actually keeps silence about this bit, so we set it
1137 	 * just in case for those revisions that require it to be set according
1138 	 * to arch/mips/au1000/common/cputable.c
1139 	 */
1140 	case 0x02030200: /* Au1100 AB */
1141 	case 0x02030201: /* Au1100 BA */
1142 	case 0x02030202: /* Au1100 BC */
1143 		set_c0_config(1 << 19);
1144 		break;
1145 	}
1146 }
1147 
1148 static void __init coherency_setup(void)
1149 {
1150 	change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1151 
1152 	/*
1153 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1154 	 * the coherency mode specified by the TLB; 1 means cachable
1155 	 * coherent update on write will be used.  Not all processors have
1156 	 * this bit and; some wire it to zero, others like Toshiba had the
1157 	 * silly idea of putting something else there ...
1158 	 */
1159 	switch (current_cpu_data.cputype) {
1160 	case CPU_R4000PC:
1161 	case CPU_R4000SC:
1162 	case CPU_R4000MC:
1163 	case CPU_R4400PC:
1164 	case CPU_R4400SC:
1165 	case CPU_R4400MC:
1166 		clear_c0_config(CONF_CU);
1167 		break;
1168 	/*
1169 	 * We need to catch the early Alchemy SOCs with
1170 	 * the write-only co_config.od bit and set it back to one...
1171 	 */
1172 	case CPU_AU1000: /* rev. DA, HA, HB */
1173 	case CPU_AU1100: /* rev. AB, BA, BC ?? */
1174 	case CPU_AU1500: /* rev. AB */
1175 		au1x00_fixup_config_od();
1176 		break;
1177 	}
1178 }
1179 
1180 void __init r4k_cache_init(void)
1181 {
1182 	extern void build_clear_page(void);
1183 	extern void build_copy_page(void);
1184 	extern char except_vec2_generic;
1185 	struct cpuinfo_mips *c = &current_cpu_data;
1186 
1187 	/* Default cache error handler for R4000 and R5000 family */
1188 	set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1189 
1190 	probe_pcache();
1191 	setup_scache();
1192 
1193 	r4k_blast_dcache_page_setup();
1194 	r4k_blast_dcache_page_indexed_setup();
1195 	r4k_blast_dcache_setup();
1196 	r4k_blast_icache_page_setup();
1197 	r4k_blast_icache_page_indexed_setup();
1198 	r4k_blast_icache_setup();
1199 	r4k_blast_scache_page_setup();
1200 	r4k_blast_scache_page_indexed_setup();
1201 	r4k_blast_scache_setup();
1202 
1203 	/*
1204 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1205 	 * This code supports virtually indexed processors and will be
1206 	 * unnecessarily inefficient on physically indexed processors.
1207 	 */
1208 	if (c->dcache.linesz)
1209 		shm_align_mask = max_t( unsigned long,
1210 					c->dcache.sets * c->dcache.linesz - 1,
1211 					PAGE_SIZE - 1);
1212 	else
1213 		shm_align_mask = PAGE_SIZE-1;
1214 	flush_cache_all		= r4k_flush_cache_all;
1215 	__flush_cache_all	= r4k___flush_cache_all;
1216 	flush_cache_mm		= r4k_flush_cache_mm;
1217 	flush_cache_page	= r4k_flush_cache_page;
1218 	flush_cache_range	= r4k_flush_cache_range;
1219 
1220 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1221 	flush_icache_all	= r4k_flush_icache_all;
1222 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1223 	flush_data_cache_page	= r4k_flush_data_cache_page;
1224 	flush_icache_range	= r4k_flush_icache_range;
1225 
1226 #ifdef CONFIG_DMA_NONCOHERENT
1227 	_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1228 	_dma_cache_wback	= r4k_dma_cache_wback_inv;
1229 	_dma_cache_inv		= r4k_dma_cache_inv;
1230 #endif
1231 
1232 	build_clear_page();
1233 	build_copy_page();
1234 	local_r4k___flush_cache_all(NULL);
1235 	coherency_setup();
1236 }
1237