xref: /linux/arch/microblaze/kernel/cpu/cache.c (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1 /*
2  * Cache control for MicroBlaze cache memories
3  *
4  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5  * Copyright (C) 2007-2009 PetaLogix
6  * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7  *
8  * This file is subject to the terms and conditions of the GNU General
9  * Public License. See the file COPYING in the main directory of this
10  * archive for more details.
11  */
12 
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
17 
18 static inline void __enable_icache_msr(void)
19 {
20 	__asm__ __volatile__ ("	msrset	r0, %0;		\
21 				nop; "			\
22 			: : "i" (MSR_ICE) : "memory");
23 }
24 
25 static inline void __disable_icache_msr(void)
26 {
27 	__asm__ __volatile__ ("	msrclr	r0, %0;		\
28 				nop; "			\
29 			: : "i" (MSR_ICE) : "memory");
30 }
31 
32 static inline void __enable_dcache_msr(void)
33 {
34 	__asm__ __volatile__ ("	msrset	r0, %0;		\
35 				nop; "			\
36 				:			\
37 				: "i" (MSR_DCE)		\
38 				: "memory");
39 }
40 
41 static inline void __disable_dcache_msr(void)
42 {
43 	__asm__ __volatile__ ("	msrclr	r0, %0;		\
44 				nop; "			\
45 				:			\
46 				: "i" (MSR_DCE)		\
47 				: "memory");
48 }
49 
50 static inline void __enable_icache_nomsr(void)
51 {
52 	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
53 				nop;			\
54 				ori	r12, r12, %0;	\
55 				mts	rmsr, r12;	\
56 				nop; "			\
57 				:			\
58 				: "i" (MSR_ICE)		\
59 				: "memory", "r12");
60 }
61 
62 static inline void __disable_icache_nomsr(void)
63 {
64 	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
65 				nop;			\
66 				andi	r12, r12, ~%0;	\
67 				mts	rmsr, r12;	\
68 				nop; "			\
69 				:			\
70 				: "i" (MSR_ICE)		\
71 				: "memory", "r12");
72 }
73 
74 static inline void __enable_dcache_nomsr(void)
75 {
76 	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
77 				nop;			\
78 				ori	r12, r12, %0;	\
79 				mts	rmsr, r12;	\
80 				nop; "			\
81 				:			\
82 				: "i" (MSR_DCE)		\
83 				: "memory", "r12");
84 }
85 
86 static inline void __disable_dcache_nomsr(void)
87 {
88 	__asm__ __volatile__ ("	mfs	r12, rmsr;	\
89 				nop;			\
90 				andi	r12, r12, ~%0;	\
91 				mts	rmsr, r12;	\
92 				nop; "			\
93 				:			\
94 				: "i" (MSR_DCE)		\
95 				: "memory", "r12");
96 }
97 
98 
99 /* Helper macro for computing the limits of cache range loops */
100 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size)	\
101 do {									\
102 	int align = ~(cache_line_length - 1);				\
103 	end = min(start + cache_size, end);				\
104 	start &= align;							\
105 	end = ((end & align) + cache_line_length);			\
106 } while (0);
107 
108 /*
109  * Helper macro to loop over the specified cache_size/line_length and
110  * execute 'op' on that cacheline
111  */
112 #define CACHE_ALL_LOOP(cache_size, line_length, op)			\
113 do {									\
114 	unsigned int len = cache_size;					\
115 	int step = -line_length;					\
116 	BUG_ON(step >= 0);						\
117 									\
118 	__asm__ __volatile__ (" 1:      " #op " %0, r0;			\
119 					bgtid   %0, 1b;			\
120 					addk    %0, %0, %1;		\
121 					" : : "r" (len), "r" (step)	\
122 					: "memory");			\
123 } while (0);
124 
125 
126 #define CACHE_ALL_LOOP2(cache_size, line_length, op)			\
127 do {									\
128 	unsigned int len = cache_size;					\
129 	int step = -line_length;					\
130 	BUG_ON(step >= 0);						\
131 									\
132 	__asm__ __volatile__ (" 1:	" #op "	r0, %0;			\
133 					bgtid	%0, 1b;			\
134 					addk	%0, %0, %1;		\
135 					" : : "r" (len), "r" (step)	\
136 					: "memory");			\
137 } while (0);
138 
139 /* for wdc.flush/clear */
140 #define CACHE_RANGE_LOOP_2(start, end, line_length, op)			\
141 do {									\
142 	int step = -line_length;					\
143 	int count = end - start;					\
144 	BUG_ON(count <= 0);						\
145 									\
146 	__asm__ __volatile__ (" 1:	" #op "	%0, %1;			\
147 					bgtid	%1, 1b;			\
148 					addk	%1, %1, %2;		\
149 					" : : "r" (start), "r" (count),	\
150 					"r" (step) : "memory");		\
151 } while (0);
152 
153 /* It is used only first parameter for OP - for wic, wdc */
154 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)			\
155 do {									\
156 	int volatile temp;						\
157 	BUG_ON(end - start <= 0);					\
158 									\
159 	__asm__ __volatile__ (" 1:	" #op "	%1, r0;			\
160 					cmpu	%0, %1, %2;		\
161 					bgtid	%0, 1b;			\
162 					addk	%1, %1, %3;		\
163 				" : : "r" (temp), "r" (start), "r" (end),\
164 					"r" (line_length) : "memory");	\
165 } while (0);
166 
167 #define ASM_LOOP
168 
169 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
170 {
171 	unsigned long flags;
172 #ifndef ASM_LOOP
173 	int i;
174 #endif
175 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
176 				(unsigned int)start, (unsigned int) end);
177 
178 	CACHE_LOOP_LIMITS(start, end,
179 			cpuinfo.icache_line_length, cpuinfo.icache_size);
180 
181 	local_irq_save(flags);
182 	__disable_icache_msr();
183 
184 #ifdef ASM_LOOP
185 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
186 #else
187 	for (i = start; i < end; i += cpuinfo.icache_line_length)
188 		__asm__ __volatile__ ("wic	%0, r0;"	\
189 				: : "r" (i));
190 #endif
191 	__enable_icache_msr();
192 	local_irq_restore(flags);
193 }
194 
195 static void __flush_icache_range_nomsr_irq(unsigned long start,
196 				unsigned long end)
197 {
198 	unsigned long flags;
199 #ifndef ASM_LOOP
200 	int i;
201 #endif
202 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
203 				(unsigned int)start, (unsigned int) end);
204 
205 	CACHE_LOOP_LIMITS(start, end,
206 			cpuinfo.icache_line_length, cpuinfo.icache_size);
207 
208 	local_irq_save(flags);
209 	__disable_icache_nomsr();
210 
211 #ifdef ASM_LOOP
212 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213 #else
214 	for (i = start; i < end; i += cpuinfo.icache_line_length)
215 		__asm__ __volatile__ ("wic	%0, r0;"	\
216 				: : "r" (i));
217 #endif
218 
219 	__enable_icache_nomsr();
220 	local_irq_restore(flags);
221 }
222 
223 static void __flush_icache_range_noirq(unsigned long start,
224 				unsigned long end)
225 {
226 #ifndef ASM_LOOP
227 	int i;
228 #endif
229 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
230 				(unsigned int)start, (unsigned int) end);
231 
232 	CACHE_LOOP_LIMITS(start, end,
233 			cpuinfo.icache_line_length, cpuinfo.icache_size);
234 #ifdef ASM_LOOP
235 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236 #else
237 	for (i = start; i < end; i += cpuinfo.icache_line_length)
238 		__asm__ __volatile__ ("wic	%0, r0;"	\
239 				: : "r" (i));
240 #endif
241 }
242 
243 static void __flush_icache_all_msr_irq(void)
244 {
245 	unsigned long flags;
246 #ifndef ASM_LOOP
247 	int i;
248 #endif
249 	pr_debug("%s\n", __func__);
250 
251 	local_irq_save(flags);
252 	__disable_icache_msr();
253 #ifdef ASM_LOOP
254 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
255 #else
256 	for (i = 0; i < cpuinfo.icache_size;
257 		 i += cpuinfo.icache_line_length)
258 			__asm__ __volatile__ ("wic	%0, r0;" \
259 					: : "r" (i));
260 #endif
261 	__enable_icache_msr();
262 	local_irq_restore(flags);
263 }
264 
265 static void __flush_icache_all_nomsr_irq(void)
266 {
267 	unsigned long flags;
268 #ifndef ASM_LOOP
269 	int i;
270 #endif
271 	pr_debug("%s\n", __func__);
272 
273 	local_irq_save(flags);
274 	__disable_icache_nomsr();
275 #ifdef ASM_LOOP
276 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
277 #else
278 	for (i = 0; i < cpuinfo.icache_size;
279 		 i += cpuinfo.icache_line_length)
280 			__asm__ __volatile__ ("wic	%0, r0;" \
281 					: : "r" (i));
282 #endif
283 	__enable_icache_nomsr();
284 	local_irq_restore(flags);
285 }
286 
287 static void __flush_icache_all_noirq(void)
288 {
289 #ifndef ASM_LOOP
290 	int i;
291 #endif
292 	pr_debug("%s\n", __func__);
293 #ifdef ASM_LOOP
294 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295 #else
296 	for (i = 0; i < cpuinfo.icache_size;
297 		 i += cpuinfo.icache_line_length)
298 			__asm__ __volatile__ ("wic	%0, r0;" \
299 					: : "r" (i));
300 #endif
301 }
302 
303 static void __invalidate_dcache_all_msr_irq(void)
304 {
305 	unsigned long flags;
306 #ifndef ASM_LOOP
307 	int i;
308 #endif
309 	pr_debug("%s\n", __func__);
310 
311 	local_irq_save(flags);
312 	__disable_dcache_msr();
313 #ifdef ASM_LOOP
314 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
315 #else
316 	for (i = 0; i < cpuinfo.dcache_size;
317 		 i += cpuinfo.dcache_line_length)
318 			__asm__ __volatile__ ("wdc	%0, r0;" \
319 					: : "r" (i));
320 #endif
321 	__enable_dcache_msr();
322 	local_irq_restore(flags);
323 }
324 
325 static void __invalidate_dcache_all_nomsr_irq(void)
326 {
327 	unsigned long flags;
328 #ifndef ASM_LOOP
329 	int i;
330 #endif
331 	pr_debug("%s\n", __func__);
332 
333 	local_irq_save(flags);
334 	__disable_dcache_nomsr();
335 #ifdef ASM_LOOP
336 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
337 #else
338 	for (i = 0; i < cpuinfo.dcache_size;
339 		 i += cpuinfo.dcache_line_length)
340 			__asm__ __volatile__ ("wdc	%0, r0;" \
341 					: : "r" (i));
342 #endif
343 	__enable_dcache_nomsr();
344 	local_irq_restore(flags);
345 }
346 
347 static void __invalidate_dcache_all_noirq_wt(void)
348 {
349 #ifndef ASM_LOOP
350 	int i;
351 #endif
352 	pr_debug("%s\n", __func__);
353 #ifdef ASM_LOOP
354 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355 #else
356 	for (i = 0; i < cpuinfo.dcache_size;
357 		 i += cpuinfo.dcache_line_length)
358 			__asm__ __volatile__ ("wdc	%0, r0;" \
359 					: : "r" (i));
360 #endif
361 }
362 
363 /* FIXME this is weird - should be only wdc but not work
364  * MS: I am getting bus errors and other weird things */
365 static void __invalidate_dcache_all_wb(void)
366 {
367 #ifndef ASM_LOOP
368 	int i;
369 #endif
370 	pr_debug("%s\n", __func__);
371 #ifdef ASM_LOOP
372 	CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
373 					wdc.clear)
374 #else
375 	for (i = 0; i < cpuinfo.dcache_size;
376 		 i += cpuinfo.dcache_line_length)
377 			__asm__ __volatile__ ("wdc.clear	%0, r0;" \
378 					: : "r" (i));
379 #endif
380 }
381 
382 static void __invalidate_dcache_range_wb(unsigned long start,
383 						unsigned long end)
384 {
385 #ifndef ASM_LOOP
386 	int i;
387 #endif
388 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
389 				(unsigned int)start, (unsigned int) end);
390 
391 	CACHE_LOOP_LIMITS(start, end,
392 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
393 #ifdef ASM_LOOP
394 	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
395 #else
396 	for (i = start; i < end; i += cpuinfo.icache_line_length)
397 		__asm__ __volatile__ ("wdc.clear	%0, r0;"	\
398 				: : "r" (i));
399 #endif
400 }
401 
402 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
403 							unsigned long end)
404 {
405 #ifndef ASM_LOOP
406 	int i;
407 #endif
408 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
409 				(unsigned int)start, (unsigned int) end);
410 	CACHE_LOOP_LIMITS(start, end,
411 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
412 
413 #ifdef ASM_LOOP
414 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
415 #else
416 	for (i = start; i < end; i += cpuinfo.icache_line_length)
417 		__asm__ __volatile__ ("wdc	%0, r0;"	\
418 				: : "r" (i));
419 #endif
420 }
421 
422 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
423 							unsigned long end)
424 {
425 	unsigned long flags;
426 #ifndef ASM_LOOP
427 	int i;
428 #endif
429 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
430 				(unsigned int)start, (unsigned int) end);
431 	CACHE_LOOP_LIMITS(start, end,
432 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
433 
434 	local_irq_save(flags);
435 	__disable_dcache_msr();
436 
437 #ifdef ASM_LOOP
438 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
439 #else
440 	for (i = start; i < end; i += cpuinfo.icache_line_length)
441 		__asm__ __volatile__ ("wdc	%0, r0;"	\
442 				: : "r" (i));
443 #endif
444 
445 	__enable_dcache_msr();
446 	local_irq_restore(flags);
447 }
448 
449 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
450 							unsigned long end)
451 {
452 	unsigned long flags;
453 #ifndef ASM_LOOP
454 	int i;
455 #endif
456 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
457 				(unsigned int)start, (unsigned int) end);
458 
459 	CACHE_LOOP_LIMITS(start, end,
460 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
461 
462 	local_irq_save(flags);
463 	__disable_dcache_nomsr();
464 
465 #ifdef ASM_LOOP
466 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
467 #else
468 	for (i = start; i < end; i += cpuinfo.icache_line_length)
469 		__asm__ __volatile__ ("wdc	%0, r0;"	\
470 				: : "r" (i));
471 #endif
472 
473 	__enable_dcache_nomsr();
474 	local_irq_restore(flags);
475 }
476 
477 static void __flush_dcache_all_wb(void)
478 {
479 #ifndef ASM_LOOP
480 	int i;
481 #endif
482 	pr_debug("%s\n", __func__);
483 #ifdef ASM_LOOP
484 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
485 				wdc.flush);
486 #else
487 	for (i = 0; i < cpuinfo.dcache_size;
488 		 i += cpuinfo.dcache_line_length)
489 			__asm__ __volatile__ ("wdc.flush	%0, r0;" \
490 					: : "r" (i));
491 #endif
492 }
493 
494 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
495 {
496 #ifndef ASM_LOOP
497 	int i;
498 #endif
499 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
500 				(unsigned int)start, (unsigned int) end);
501 
502 	CACHE_LOOP_LIMITS(start, end,
503 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
504 #ifdef ASM_LOOP
505 	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
506 #else
507 	for (i = start; i < end; i += cpuinfo.icache_line_length)
508 		__asm__ __volatile__ ("wdc.flush	%0, r0;"	\
509 				: : "r" (i));
510 #endif
511 }
512 
513 /* struct for wb caches and for wt caches */
514 struct scache *mbc;
515 
516 /* new wb cache model */
517 const struct scache wb_msr = {
518 	.ie = __enable_icache_msr,
519 	.id = __disable_icache_msr,
520 	.ifl = __flush_icache_all_noirq,
521 	.iflr = __flush_icache_range_noirq,
522 	.iin = __flush_icache_all_noirq,
523 	.iinr = __flush_icache_range_noirq,
524 	.de = __enable_dcache_msr,
525 	.dd = __disable_dcache_msr,
526 	.dfl = __flush_dcache_all_wb,
527 	.dflr = __flush_dcache_range_wb,
528 	.din = __invalidate_dcache_all_wb,
529 	.dinr = __invalidate_dcache_range_wb,
530 };
531 
532 /* There is only difference in ie, id, de, dd functions */
533 const struct scache wb_nomsr = {
534 	.ie = __enable_icache_nomsr,
535 	.id = __disable_icache_nomsr,
536 	.ifl = __flush_icache_all_noirq,
537 	.iflr = __flush_icache_range_noirq,
538 	.iin = __flush_icache_all_noirq,
539 	.iinr = __flush_icache_range_noirq,
540 	.de = __enable_dcache_nomsr,
541 	.dd = __disable_dcache_nomsr,
542 	.dfl = __flush_dcache_all_wb,
543 	.dflr = __flush_dcache_range_wb,
544 	.din = __invalidate_dcache_all_wb,
545 	.dinr = __invalidate_dcache_range_wb,
546 };
547 
548 /* Old wt cache model with disabling irq and turn off cache */
549 const struct scache wt_msr = {
550 	.ie = __enable_icache_msr,
551 	.id = __disable_icache_msr,
552 	.ifl = __flush_icache_all_msr_irq,
553 	.iflr = __flush_icache_range_msr_irq,
554 	.iin = __flush_icache_all_msr_irq,
555 	.iinr = __flush_icache_range_msr_irq,
556 	.de = __enable_dcache_msr,
557 	.dd = __disable_dcache_msr,
558 	.dfl = __invalidate_dcache_all_msr_irq,
559 	.dflr = __invalidate_dcache_range_msr_irq_wt,
560 	.din = __invalidate_dcache_all_msr_irq,
561 	.dinr = __invalidate_dcache_range_msr_irq_wt,
562 };
563 
564 const struct scache wt_nomsr = {
565 	.ie = __enable_icache_nomsr,
566 	.id = __disable_icache_nomsr,
567 	.ifl = __flush_icache_all_nomsr_irq,
568 	.iflr = __flush_icache_range_nomsr_irq,
569 	.iin = __flush_icache_all_nomsr_irq,
570 	.iinr = __flush_icache_range_nomsr_irq,
571 	.de = __enable_dcache_nomsr,
572 	.dd = __disable_dcache_nomsr,
573 	.dfl = __invalidate_dcache_all_nomsr_irq,
574 	.dflr = __invalidate_dcache_range_nomsr_irq,
575 	.din = __invalidate_dcache_all_nomsr_irq,
576 	.dinr = __invalidate_dcache_range_nomsr_irq,
577 };
578 
579 /* New wt cache model for newer Microblaze versions */
580 const struct scache wt_msr_noirq = {
581 	.ie = __enable_icache_msr,
582 	.id = __disable_icache_msr,
583 	.ifl = __flush_icache_all_noirq,
584 	.iflr = __flush_icache_range_noirq,
585 	.iin = __flush_icache_all_noirq,
586 	.iinr = __flush_icache_range_noirq,
587 	.de = __enable_dcache_msr,
588 	.dd = __disable_dcache_msr,
589 	.dfl = __invalidate_dcache_all_noirq_wt,
590 	.dflr = __invalidate_dcache_range_nomsr_wt,
591 	.din = __invalidate_dcache_all_noirq_wt,
592 	.dinr = __invalidate_dcache_range_nomsr_wt,
593 };
594 
595 const struct scache wt_nomsr_noirq = {
596 	.ie = __enable_icache_nomsr,
597 	.id = __disable_icache_nomsr,
598 	.ifl = __flush_icache_all_noirq,
599 	.iflr = __flush_icache_range_noirq,
600 	.iin = __flush_icache_all_noirq,
601 	.iinr = __flush_icache_range_noirq,
602 	.de = __enable_dcache_nomsr,
603 	.dd = __disable_dcache_nomsr,
604 	.dfl = __invalidate_dcache_all_noirq_wt,
605 	.dflr = __invalidate_dcache_range_nomsr_wt,
606 	.din = __invalidate_dcache_all_noirq_wt,
607 	.dinr = __invalidate_dcache_range_nomsr_wt,
608 };
609 
610 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
611 #define CPUVER_7_20_A	0x0c
612 #define CPUVER_7_20_D	0x0f
613 
614 #define INFO(s)	printk(KERN_INFO "cache: " s "\n");
615 
616 void microblaze_cache_init(void)
617 {
618 	if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
619 		if (cpuinfo.dcache_wb) {
620 			INFO("wb_msr");
621 			mbc = (struct scache *)&wb_msr;
622 			if (cpuinfo.ver_code < CPUVER_7_20_D) {
623 				/* MS: problem with signal handling - hw bug */
624 				INFO("WB won't work properly");
625 			}
626 		} else {
627 			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
628 				INFO("wt_msr_noirq");
629 				mbc = (struct scache *)&wt_msr_noirq;
630 			} else {
631 				INFO("wt_msr");
632 				mbc = (struct scache *)&wt_msr;
633 			}
634 		}
635 	} else {
636 		if (cpuinfo.dcache_wb) {
637 			INFO("wb_nomsr");
638 			mbc = (struct scache *)&wb_nomsr;
639 			if (cpuinfo.ver_code < CPUVER_7_20_D) {
640 				/* MS: problem with signal handling - hw bug */
641 				INFO("WB won't work properly");
642 			}
643 		} else {
644 			if (cpuinfo.ver_code >= CPUVER_7_20_A) {
645 				INFO("wt_nomsr_noirq");
646 				mbc = (struct scache *)&wt_nomsr_noirq;
647 			} else {
648 				INFO("wt_nomsr");
649 				mbc = (struct scache *)&wt_nomsr;
650 			}
651 		}
652 	}
653 	invalidate_dcache();
654 	enable_dcache();
655 
656 	invalidate_icache();
657 	enable_icache();
658 }
659