xref: /linux/arch/sh/mm/cache-sh4.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  * arch/sh/mm/cache-sh4.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2001 - 2006  Paul Mundt
6  * Copyright (C) 2003  Richard Curnow
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/io.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 /*
20  * The maximum number of pages we support up to when doing ranged dcache
21  * flushing. Anything exceeding this will simply flush the dcache in its
22  * entirety.
23  */
24 #define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */
25 
26 static void __flush_dcache_segment_1way(unsigned long start,
27 					unsigned long extent);
28 static void __flush_dcache_segment_2way(unsigned long start,
29 					unsigned long extent);
30 static void __flush_dcache_segment_4way(unsigned long start,
31 					unsigned long extent);
32 
33 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
34 			       unsigned long exec_offset);
35 
36 /*
37  * This is initialised here to ensure that it is not placed in the BSS.  If
38  * that were to happen, note that cache_init gets called before the BSS is
39  * cleared, so this would get nulled out which would be hopeless.
40  */
41 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
42 	(void (*)(unsigned long, unsigned long))0xdeadbeef;
43 
44 static void compute_alias(struct cache_info *c)
45 {
46 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
47 	c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
48 }
49 
50 static void __init emit_cache_params(void)
51 {
52 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 		ctrl_inl(CCN_PVR),
54 		ctrl_inl(CCN_CVR),
55 		ctrl_inl(CCN_PRR));
56 	printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 		cpu_data->icache.ways,
58 		cpu_data->icache.sets,
59 		cpu_data->icache.way_incr);
60 	printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 		cpu_data->icache.entry_mask,
62 		cpu_data->icache.alias_mask,
63 		cpu_data->icache.n_aliases);
64 	printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 		cpu_data->dcache.ways,
66 		cpu_data->dcache.sets,
67 		cpu_data->dcache.way_incr);
68 	printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 		cpu_data->dcache.entry_mask,
70 		cpu_data->dcache.alias_mask,
71 		cpu_data->dcache.n_aliases);
72 
73 	if (!__flush_dcache_segment_fn)
74 		panic("unknown number of cache ways\n");
75 }
76 
77 /*
78  * SH-4 has virtually indexed and physically tagged cache.
79  */
80 
81 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
82 #define MAX_P3_MUTEXES 16
83 
84 struct mutex p3map_mutex[MAX_P3_MUTEXES];
85 
86 void __init p3_cache_init(void)
87 {
88 	int i;
89 
90 	compute_alias(&cpu_data->icache);
91 	compute_alias(&cpu_data->dcache);
92 
93 	switch (cpu_data->dcache.ways) {
94 	case 1:
95 		__flush_dcache_segment_fn = __flush_dcache_segment_1way;
96 		break;
97 	case 2:
98 		__flush_dcache_segment_fn = __flush_dcache_segment_2way;
99 		break;
100 	case 4:
101 		__flush_dcache_segment_fn = __flush_dcache_segment_4way;
102 		break;
103 	default:
104 		__flush_dcache_segment_fn = NULL;
105 		break;
106 	}
107 
108 	emit_cache_params();
109 
110 	if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 		panic("%s failed.", __FUNCTION__);
112 
113 	for (i = 0; i < cpu_data->dcache.n_aliases; i++)
114 		mutex_init(&p3map_mutex[i]);
115 }
116 
117 /*
118  * Write back the dirty D-caches, but not invalidate them.
119  *
120  * START: Virtual Address (U0, P1, or P3)
121  * SIZE: Size of the region.
122  */
123 void __flush_wback_region(void *start, int size)
124 {
125 	unsigned long v;
126 	unsigned long begin, end;
127 
128 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 		& ~(L1_CACHE_BYTES-1);
131 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
132 		asm volatile("ocbwb	%0"
133 			     : /* no output */
134 			     : "m" (__m(v)));
135 	}
136 }
137 
138 /*
139  * Write back the dirty D-caches and invalidate them.
140  *
141  * START: Virtual Address (U0, P1, or P3)
142  * SIZE: Size of the region.
143  */
144 void __flush_purge_region(void *start, int size)
145 {
146 	unsigned long v;
147 	unsigned long begin, end;
148 
149 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
150 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
151 		& ~(L1_CACHE_BYTES-1);
152 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
153 		asm volatile("ocbp	%0"
154 			     : /* no output */
155 			     : "m" (__m(v)));
156 	}
157 }
158 
159 /*
160  * No write back please
161  */
162 void __flush_invalidate_region(void *start, int size)
163 {
164 	unsigned long v;
165 	unsigned long begin, end;
166 
167 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
168 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
169 		& ~(L1_CACHE_BYTES-1);
170 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
171 		asm volatile("ocbi	%0"
172 			     : /* no output */
173 			     : "m" (__m(v)));
174 	}
175 }
176 
177 /*
178  * Write back the range of D-cache, and purge the I-cache.
179  *
180  * Called from kernel/module.c:sys_init_module and routine for a.out format.
181  */
182 void flush_icache_range(unsigned long start, unsigned long end)
183 {
184 	flush_cache_all();
185 }
186 
187 /*
188  * Write back the D-cache and purge the I-cache for signal trampoline.
189  * .. which happens to be the same behavior as flush_icache_range().
190  * So, we simply flush out a line.
191  */
192 void flush_cache_sigtramp(unsigned long addr)
193 {
194 	unsigned long v, index;
195 	unsigned long flags;
196 	int i;
197 
198 	v = addr & ~(L1_CACHE_BYTES-1);
199 	asm volatile("ocbwb	%0"
200 		     : /* no output */
201 		     : "m" (__m(v)));
202 
203 	index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
204 
205 	local_irq_save(flags);
206 	jump_to_P2();
207 
208 	for (i = 0; i < cpu_data->icache.ways;
209 	     i++, index += cpu_data->icache.way_incr)
210 		ctrl_outl(0, index);	/* Clear out Valid-bit */
211 
212 	back_to_P1();
213 	wmb();
214 	local_irq_restore(flags);
215 }
216 
217 static inline void flush_cache_4096(unsigned long start,
218 				    unsigned long phys)
219 {
220 	unsigned long flags, exec_offset = 0;
221 
222 	/*
223 	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
224 	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
225 	 */
226 	if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
227 	    (start < CACHE_OC_ADDRESS_ARRAY))
228 		exec_offset = 0x20000000;
229 
230 	local_irq_save(flags);
231 	__flush_cache_4096(start | SH_CACHE_ASSOC,
232 			   P1SEGADDR(phys), exec_offset);
233 	local_irq_restore(flags);
234 }
235 
236 /*
237  * Write back & invalidate the D-cache of the page.
238  * (To avoid "alias" issues)
239  */
240 void flush_dcache_page(struct page *page)
241 {
242 	if (test_bit(PG_mapped, &page->flags)) {
243 		unsigned long phys = PHYSADDR(page_address(page));
244 		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
245 		int i, n;
246 
247 		/* Loop all the D-cache */
248 		n = cpu_data->dcache.n_aliases;
249 		for (i = 0; i < n; i++, addr += 4096)
250 			flush_cache_4096(addr, phys);
251 	}
252 
253 	wmb();
254 }
255 
256 /* TODO: Selective icache invalidation through IC address array.. */
257 static inline void flush_icache_all(void)
258 {
259 	unsigned long flags, ccr;
260 
261 	local_irq_save(flags);
262 	jump_to_P2();
263 
264 	/* Flush I-cache */
265 	ccr = ctrl_inl(CCR);
266 	ccr |= CCR_CACHE_ICI;
267 	ctrl_outl(ccr, CCR);
268 
269 	/*
270 	 * back_to_P1() will take care of the barrier for us, don't add
271 	 * another one!
272 	 */
273 
274 	back_to_P1();
275 	local_irq_restore(flags);
276 }
277 
278 void flush_dcache_all(void)
279 {
280 	(*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
281 	wmb();
282 }
283 
284 void flush_cache_all(void)
285 {
286 	flush_dcache_all();
287 	flush_icache_all();
288 }
289 
290 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
291 			     unsigned long end)
292 {
293 	unsigned long d = 0, p = start & PAGE_MASK;
294 	unsigned long alias_mask = cpu_data->dcache.alias_mask;
295 	unsigned long n_aliases = cpu_data->dcache.n_aliases;
296 	unsigned long select_bit;
297 	unsigned long all_aliases_mask;
298 	unsigned long addr_offset;
299 	pgd_t *dir;
300 	pmd_t *pmd;
301 	pud_t *pud;
302 	pte_t *pte;
303 	int i;
304 
305 	dir = pgd_offset(mm, p);
306 	pud = pud_offset(dir, p);
307 	pmd = pmd_offset(pud, p);
308 	end = PAGE_ALIGN(end);
309 
310 	all_aliases_mask = (1 << n_aliases) - 1;
311 
312 	do {
313 		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
314 			p &= PMD_MASK;
315 			p += PMD_SIZE;
316 			pmd++;
317 
318 			continue;
319 		}
320 
321 		pte = pte_offset_kernel(pmd, p);
322 
323 		do {
324 			unsigned long phys;
325 			pte_t entry = *pte;
326 
327 			if (!(pte_val(entry) & _PAGE_PRESENT)) {
328 				pte++;
329 				p += PAGE_SIZE;
330 				continue;
331 			}
332 
333 			phys = pte_val(entry) & PTE_PHYS_MASK;
334 
335 			if ((p ^ phys) & alias_mask) {
336 				d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
337 				d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
338 
339 				if (d == all_aliases_mask)
340 					goto loop_exit;
341 			}
342 
343 			pte++;
344 			p += PAGE_SIZE;
345 		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));
346 		pmd++;
347 	} while (p < end);
348 
349 loop_exit:
350 	addr_offset = 0;
351 	select_bit = 1;
352 
353 	for (i = 0; i < n_aliases; i++) {
354 		if (d & select_bit) {
355 			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
356 			wmb();
357 		}
358 
359 		select_bit <<= 1;
360 		addr_offset += PAGE_SIZE;
361 	}
362 }
363 
364 /*
365  * Note : (RPC) since the caches are physically tagged, the only point
366  * of flush_cache_mm for SH-4 is to get rid of aliases from the
367  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
368  * lines can stay resident so long as the virtual address they were
369  * accessed with (hence cache set) is in accord with the physical
370  * address (i.e. tag).  It's no different here.  So I reckon we don't
371  * need to flush the I-cache, since aliases don't matter for that.  We
372  * should try that.
373  *
374  * Caller takes mm->mmap_sem.
375  */
376 void flush_cache_mm(struct mm_struct *mm)
377 {
378 	/*
379 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
380 	 * the cache is physically tagged, the data can just be left in there.
381 	 */
382 	if (cpu_data->dcache.n_aliases == 0)
383 		return;
384 
385 	/*
386 	 * Don't bother groveling around the dcache for the VMA ranges
387 	 * if there are too many PTEs to make it worthwhile.
388 	 */
389 	if (mm->nr_ptes >= MAX_DCACHE_PAGES)
390 		flush_dcache_all();
391 	else {
392 		struct vm_area_struct *vma;
393 
394 		/*
395 		 * In this case there are reasonably sized ranges to flush,
396 		 * iterate through the VMA list and take care of any aliases.
397 		 */
398 		for (vma = mm->mmap; vma; vma = vma->vm_next)
399 			__flush_cache_mm(mm, vma->vm_start, vma->vm_end);
400 	}
401 
402 	/* Only touch the icache if one of the VMAs has VM_EXEC set. */
403 	if (mm->exec_vm)
404 		flush_icache_all();
405 }
406 
407 /*
408  * Write back and invalidate I/D-caches for the page.
409  *
410  * ADDR: Virtual Address (U0 address)
411  * PFN: Physical page number
412  */
413 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
414 		      unsigned long pfn)
415 {
416 	unsigned long phys = pfn << PAGE_SHIFT;
417 	unsigned int alias_mask;
418 
419 	alias_mask = cpu_data->dcache.alias_mask;
420 
421 	/* We only need to flush D-cache when we have alias */
422 	if ((address^phys) & alias_mask) {
423 		/* Loop 4K of the D-cache */
424 		flush_cache_4096(
425 			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
426 			phys);
427 		/* Loop another 4K of the D-cache */
428 		flush_cache_4096(
429 			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
430 			phys);
431 	}
432 
433 	alias_mask = cpu_data->icache.alias_mask;
434 	if (vma->vm_flags & VM_EXEC) {
435 		/*
436 		 * Evict entries from the portion of the cache from which code
437 		 * may have been executed at this address (virtual).  There's
438 		 * no need to evict from the portion corresponding to the
439 		 * physical address as for the D-cache, because we know the
440 		 * kernel has never executed the code through its identity
441 		 * translation.
442 		 */
443 		flush_cache_4096(
444 			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
445 			phys);
446 	}
447 }
448 
449 /*
450  * Write back and invalidate D-caches.
451  *
452  * START, END: Virtual Address (U0 address)
453  *
454  * NOTE: We need to flush the _physical_ page entry.
455  * Flushing the cache lines for U0 only isn't enough.
456  * We need to flush for P1 too, which may contain aliases.
457  */
458 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
459 		       unsigned long end)
460 {
461 	/*
462 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
463 	 * the cache is physically tagged, the data can just be left in there.
464 	 */
465 	if (cpu_data->dcache.n_aliases == 0)
466 		return;
467 
468 	/*
469 	 * Don't bother with the lookup and alias check if we have a
470 	 * wide range to cover, just blow away the dcache in its
471 	 * entirety instead. -- PFM.
472 	 */
473 	if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
474 		flush_dcache_all();
475 	else
476 		__flush_cache_mm(vma->vm_mm, start, end);
477 
478 	if (vma->vm_flags & VM_EXEC) {
479 		/*
480 		 * TODO: Is this required???  Need to look at how I-cache
481 		 * coherency is assured when new programs are loaded to see if
482 		 * this matters.
483 		 */
484 		flush_icache_all();
485 	}
486 }
487 
488 /*
489  * flush_icache_user_range
490  * @vma: VMA of the process
491  * @page: page
492  * @addr: U0 address
493  * @len: length of the range (< page size)
494  */
495 void flush_icache_user_range(struct vm_area_struct *vma,
496 			     struct page *page, unsigned long addr, int len)
497 {
498 	flush_cache_page(vma, addr, page_to_pfn(page));
499 	mb();
500 }
501 
502 /**
503  * __flush_cache_4096
504  *
505  * @addr:  address in memory mapped cache array
506  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
507  *         set i.e. associative write)
508  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
509  *               region else 0x0
510  *
511  * The offset into the cache array implied by 'addr' selects the
512  * 'colour' of the virtual address range that will be flushed.  The
513  * operation (purge/write-back) is selected by the lower 2 bits of
514  * 'phys'.
515  */
516 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
517 			       unsigned long exec_offset)
518 {
519 	int way_count;
520 	unsigned long base_addr = addr;
521 	struct cache_info *dcache;
522 	unsigned long way_incr;
523 	unsigned long a, ea, p;
524 	unsigned long temp_pc;
525 
526 	dcache = &cpu_data->dcache;
527 	/* Write this way for better assembly. */
528 	way_count = dcache->ways;
529 	way_incr = dcache->way_incr;
530 
531 	/*
532 	 * Apply exec_offset (i.e. branch to P2 if required.).
533 	 *
534 	 * FIXME:
535 	 *
536 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
537 	 *	trashing exec_offset before it's been added on - why?  Hence
538 	 *	"=&r" as a 'workaround'
539 	 */
540 	asm volatile("mov.l 1f, %0\n\t"
541 		     "add   %1, %0\n\t"
542 		     "jmp   @%0\n\t"
543 		     "nop\n\t"
544 		     ".balign 4\n\t"
545 		     "1:  .long 2f\n\t"
546 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
547 
548 	/*
549 	 * We know there will be >=1 iteration, so write as do-while to avoid
550 	 * pointless nead-of-loop check for 0 iterations.
551 	 */
552 	do {
553 		ea = base_addr + PAGE_SIZE;
554 		a = base_addr;
555 		p = phys;
556 
557 		do {
558 			*(volatile unsigned long *)a = p;
559 			/*
560 			 * Next line: intentionally not p+32, saves an add, p
561 			 * will do since only the cache tag bits need to
562 			 * match.
563 			 */
564 			*(volatile unsigned long *)(a+32) = p;
565 			a += 64;
566 			p += 64;
567 		} while (a < ea);
568 
569 		base_addr += way_incr;
570 	} while (--way_count != 0);
571 }
572 
573 /*
574  * Break the 1, 2 and 4 way variants of this out into separate functions to
575  * avoid nearly all the overhead of having the conditional stuff in the function
576  * bodies (+ the 1 and 2 way cases avoid saving any registers too).
577  */
578 static void __flush_dcache_segment_1way(unsigned long start,
579 					unsigned long extent_per_way)
580 {
581 	unsigned long orig_sr, sr_with_bl;
582 	unsigned long base_addr;
583 	unsigned long way_incr, linesz, way_size;
584 	struct cache_info *dcache;
585 	register unsigned long a0, a0e;
586 
587 	asm volatile("stc sr, %0" : "=r" (orig_sr));
588 	sr_with_bl = orig_sr | (1<<28);
589 	base_addr = ((unsigned long)&empty_zero_page[0]);
590 
591 	/*
592 	 * The previous code aligned base_addr to 16k, i.e. the way_size of all
593 	 * existing SH-4 D-caches.  Whilst I don't see a need to have this
594 	 * aligned to any better than the cache line size (which it will be
595 	 * anyway by construction), let's align it to at least the way_size of
596 	 * any existing or conceivable SH-4 D-cache.  -- RPC
597 	 */
598 	base_addr = ((base_addr >> 16) << 16);
599 	base_addr |= start;
600 
601 	dcache = &cpu_data->dcache;
602 	linesz = dcache->linesz;
603 	way_incr = dcache->way_incr;
604 	way_size = dcache->way_size;
605 
606 	a0 = base_addr;
607 	a0e = base_addr + extent_per_way;
608 	do {
609 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
610 		asm volatile("movca.l r0, @%0\n\t"
611 			     "ocbi @%0" : : "r" (a0));
612 		a0 += linesz;
613 		asm volatile("movca.l r0, @%0\n\t"
614 			     "ocbi @%0" : : "r" (a0));
615 		a0 += linesz;
616 		asm volatile("movca.l r0, @%0\n\t"
617 			     "ocbi @%0" : : "r" (a0));
618 		a0 += linesz;
619 		asm volatile("movca.l r0, @%0\n\t"
620 			     "ocbi @%0" : : "r" (a0));
621 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
622 		a0 += linesz;
623 	} while (a0 < a0e);
624 }
625 
626 static void __flush_dcache_segment_2way(unsigned long start,
627 					unsigned long extent_per_way)
628 {
629 	unsigned long orig_sr, sr_with_bl;
630 	unsigned long base_addr;
631 	unsigned long way_incr, linesz, way_size;
632 	struct cache_info *dcache;
633 	register unsigned long a0, a1, a0e;
634 
635 	asm volatile("stc sr, %0" : "=r" (orig_sr));
636 	sr_with_bl = orig_sr | (1<<28);
637 	base_addr = ((unsigned long)&empty_zero_page[0]);
638 
639 	/* See comment under 1-way above */
640 	base_addr = ((base_addr >> 16) << 16);
641 	base_addr |= start;
642 
643 	dcache = &cpu_data->dcache;
644 	linesz = dcache->linesz;
645 	way_incr = dcache->way_incr;
646 	way_size = dcache->way_size;
647 
648 	a0 = base_addr;
649 	a1 = a0 + way_incr;
650 	a0e = base_addr + extent_per_way;
651 	do {
652 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
653 		asm volatile("movca.l r0, @%0\n\t"
654 			     "movca.l r0, @%1\n\t"
655 			     "ocbi @%0\n\t"
656 			     "ocbi @%1" : :
657 			     "r" (a0), "r" (a1));
658 		a0 += linesz;
659 		a1 += linesz;
660 		asm volatile("movca.l r0, @%0\n\t"
661 			     "movca.l r0, @%1\n\t"
662 			     "ocbi @%0\n\t"
663 			     "ocbi @%1" : :
664 			     "r" (a0), "r" (a1));
665 		a0 += linesz;
666 		a1 += linesz;
667 		asm volatile("movca.l r0, @%0\n\t"
668 			     "movca.l r0, @%1\n\t"
669 			     "ocbi @%0\n\t"
670 			     "ocbi @%1" : :
671 			     "r" (a0), "r" (a1));
672 		a0 += linesz;
673 		a1 += linesz;
674 		asm volatile("movca.l r0, @%0\n\t"
675 			     "movca.l r0, @%1\n\t"
676 			     "ocbi @%0\n\t"
677 			     "ocbi @%1" : :
678 			     "r" (a0), "r" (a1));
679 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
680 		a0 += linesz;
681 		a1 += linesz;
682 	} while (a0 < a0e);
683 }
684 
685 static void __flush_dcache_segment_4way(unsigned long start,
686 					unsigned long extent_per_way)
687 {
688 	unsigned long orig_sr, sr_with_bl;
689 	unsigned long base_addr;
690 	unsigned long way_incr, linesz, way_size;
691 	struct cache_info *dcache;
692 	register unsigned long a0, a1, a2, a3, a0e;
693 
694 	asm volatile("stc sr, %0" : "=r" (orig_sr));
695 	sr_with_bl = orig_sr | (1<<28);
696 	base_addr = ((unsigned long)&empty_zero_page[0]);
697 
698 	/* See comment under 1-way above */
699 	base_addr = ((base_addr >> 16) << 16);
700 	base_addr |= start;
701 
702 	dcache = &cpu_data->dcache;
703 	linesz = dcache->linesz;
704 	way_incr = dcache->way_incr;
705 	way_size = dcache->way_size;
706 
707 	a0 = base_addr;
708 	a1 = a0 + way_incr;
709 	a2 = a1 + way_incr;
710 	a3 = a2 + way_incr;
711 	a0e = base_addr + extent_per_way;
712 	do {
713 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
714 		asm volatile("movca.l r0, @%0\n\t"
715 			     "movca.l r0, @%1\n\t"
716 			     "movca.l r0, @%2\n\t"
717 			     "movca.l r0, @%3\n\t"
718 			     "ocbi @%0\n\t"
719 			     "ocbi @%1\n\t"
720 			     "ocbi @%2\n\t"
721 			     "ocbi @%3\n\t" : :
722 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
723 		a0 += linesz;
724 		a1 += linesz;
725 		a2 += linesz;
726 		a3 += linesz;
727 		asm volatile("movca.l r0, @%0\n\t"
728 			     "movca.l r0, @%1\n\t"
729 			     "movca.l r0, @%2\n\t"
730 			     "movca.l r0, @%3\n\t"
731 			     "ocbi @%0\n\t"
732 			     "ocbi @%1\n\t"
733 			     "ocbi @%2\n\t"
734 			     "ocbi @%3\n\t" : :
735 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
736 		a0 += linesz;
737 		a1 += linesz;
738 		a2 += linesz;
739 		a3 += linesz;
740 		asm volatile("movca.l r0, @%0\n\t"
741 			     "movca.l r0, @%1\n\t"
742 			     "movca.l r0, @%2\n\t"
743 			     "movca.l r0, @%3\n\t"
744 			     "ocbi @%0\n\t"
745 			     "ocbi @%1\n\t"
746 			     "ocbi @%2\n\t"
747 			     "ocbi @%3\n\t" : :
748 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
749 		a0 += linesz;
750 		a1 += linesz;
751 		a2 += linesz;
752 		a3 += linesz;
753 		asm volatile("movca.l r0, @%0\n\t"
754 			     "movca.l r0, @%1\n\t"
755 			     "movca.l r0, @%2\n\t"
756 			     "movca.l r0, @%3\n\t"
757 			     "ocbi @%0\n\t"
758 			     "ocbi @%1\n\t"
759 			     "ocbi @%2\n\t"
760 			     "ocbi @%3\n\t" : :
761 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
762 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
763 		a0 += linesz;
764 		a1 += linesz;
765 		a2 += linesz;
766 		a3 += linesz;
767 	} while (a0 < a0e);
768 }
769