xref: /linux/arch/sh/mm/cache.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1c456cfc2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cbbe2f68SPaul Mundt /*
3f26b2a56SPaul Mundt  * arch/sh/mm/cache.c
4cbbe2f68SPaul Mundt  *
5cbbe2f68SPaul Mundt  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
6a6198a23SPaul Mundt  * Copyright (C) 2002 - 2010  Paul Mundt
7cbbe2f68SPaul Mundt  */
8cbbe2f68SPaul Mundt #include <linux/mm.h>
9cbbe2f68SPaul Mundt #include <linux/init.h>
10cbbe2f68SPaul Mundt #include <linux/mutex.h>
11cbbe2f68SPaul Mundt #include <linux/fs.h>
12f26b2a56SPaul Mundt #include <linux/smp.h>
13cbbe2f68SPaul Mundt #include <linux/highmem.h>
14cbbe2f68SPaul Mundt #include <linux/module.h>
15cbbe2f68SPaul Mundt #include <asm/mmu_context.h>
16cbbe2f68SPaul Mundt #include <asm/cacheflush.h>
17cbbe2f68SPaul Mundt 
18f26b2a56SPaul Mundt void (*local_flush_cache_all)(void *args) = cache_noop;
19f26b2a56SPaul Mundt void (*local_flush_cache_mm)(void *args) = cache_noop;
20f26b2a56SPaul Mundt void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
21f26b2a56SPaul Mundt void (*local_flush_cache_page)(void *args) = cache_noop;
22f26b2a56SPaul Mundt void (*local_flush_cache_range)(void *args) = cache_noop;
23157efa29SMatthew Wilcox (Oracle) void (*local_flush_dcache_folio)(void *args) = cache_noop;
24f26b2a56SPaul Mundt void (*local_flush_icache_range)(void *args) = cache_noop;
25157efa29SMatthew Wilcox (Oracle) void (*local_flush_icache_folio)(void *args) = cache_noop;
26f26b2a56SPaul Mundt void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
27f26b2a56SPaul Mundt 
2837443ef3SPaul Mundt void (*__flush_wback_region)(void *start, int size);
290a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_wback_region);
3037443ef3SPaul Mundt void (*__flush_purge_region)(void *start, int size);
310a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_purge_region);
3237443ef3SPaul Mundt void (*__flush_invalidate_region)(void *start, int size);
330a993b0aSPaul Mundt EXPORT_SYMBOL(__flush_invalidate_region);
3437443ef3SPaul Mundt 
3537443ef3SPaul Mundt static inline void noop__flush_region(void *start, int size)
3637443ef3SPaul Mundt {
3737443ef3SPaul Mundt }
3837443ef3SPaul Mundt 
396f379578SPaul Mundt static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
406f379578SPaul Mundt                                    int wait)
416f379578SPaul Mundt {
426f379578SPaul Mundt 	preempt_disable();
43a6198a23SPaul Mundt 
44a1e262f6SRich Felker 	/* Needing IPI for cross-core flush is SHX3-specific. */
45a1e262f6SRich Felker #ifdef CONFIG_CPU_SHX3
46a6198a23SPaul Mundt 	/*
47a6198a23SPaul Mundt 	 * It's possible that this gets called early on when IRQs are
48a6198a23SPaul Mundt 	 * still disabled due to ioremapping by the boot CPU, so don't
49a6198a23SPaul Mundt 	 * even attempt IPIs unless there are other CPUs online.
50a6198a23SPaul Mundt 	 */
51a6198a23SPaul Mundt 	if (num_online_cpus() > 1)
526f379578SPaul Mundt 		smp_call_function(func, info, wait);
53a1e262f6SRich Felker #endif
54a6198a23SPaul Mundt 
556f379578SPaul Mundt 	func(info);
56a6198a23SPaul Mundt 
576f379578SPaul Mundt 	preempt_enable();
586f379578SPaul Mundt }
596f379578SPaul Mundt 
60cbbe2f68SPaul Mundt void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
61cbbe2f68SPaul Mundt 		       unsigned long vaddr, void *dst, const void *src,
62cbbe2f68SPaul Mundt 		       unsigned long len)
63cbbe2f68SPaul Mundt {
64157efa29SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
65157efa29SMatthew Wilcox (Oracle) 
66157efa29SMatthew Wilcox (Oracle) 	if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
67157efa29SMatthew Wilcox (Oracle) 	    test_bit(PG_dcache_clean, &folio->flags)) {
68cbbe2f68SPaul Mundt 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
69cbbe2f68SPaul Mundt 		memcpy(vto, src, len);
700906a3adSPaul Mundt 		kunmap_coherent(vto);
71cbbe2f68SPaul Mundt 	} else {
72cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
73cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
74157efa29SMatthew Wilcox (Oracle) 			clear_bit(PG_dcache_clean, &folio->flags);
75cbbe2f68SPaul Mundt 	}
76cbbe2f68SPaul Mundt 
77cbbe2f68SPaul Mundt 	if (vma->vm_flags & VM_EXEC)
78cbbe2f68SPaul Mundt 		flush_cache_page(vma, vaddr, page_to_pfn(page));
79cbbe2f68SPaul Mundt }
80cbbe2f68SPaul Mundt 
81cbbe2f68SPaul Mundt void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
82cbbe2f68SPaul Mundt 			 unsigned long vaddr, void *dst, const void *src,
83cbbe2f68SPaul Mundt 			 unsigned long len)
84cbbe2f68SPaul Mundt {
85157efa29SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
86157efa29SMatthew Wilcox (Oracle) 
87*60706580SDavid Hildenbrand 	if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
88157efa29SMatthew Wilcox (Oracle) 	    test_bit(PG_dcache_clean, &folio->flags)) {
89cbbe2f68SPaul Mundt 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
90cbbe2f68SPaul Mundt 		memcpy(dst, vfrom, len);
910906a3adSPaul Mundt 		kunmap_coherent(vfrom);
92cbbe2f68SPaul Mundt 	} else {
93cbbe2f68SPaul Mundt 		memcpy(dst, src, len);
94cbbe2f68SPaul Mundt 		if (boot_cpu_data.dcache.n_aliases)
95157efa29SMatthew Wilcox (Oracle) 			clear_bit(PG_dcache_clean, &folio->flags);
96cbbe2f68SPaul Mundt 	}
97cbbe2f68SPaul Mundt }
98cbbe2f68SPaul Mundt 
99cbbe2f68SPaul Mundt void copy_user_highpage(struct page *to, struct page *from,
100cbbe2f68SPaul Mundt 			unsigned long vaddr, struct vm_area_struct *vma)
101cbbe2f68SPaul Mundt {
102157efa29SMatthew Wilcox (Oracle) 	struct folio *src = page_folio(from);
103cbbe2f68SPaul Mundt 	void *vfrom, *vto;
104cbbe2f68SPaul Mundt 
105bc3e11beSCong Wang 	vto = kmap_atomic(to);
106cbbe2f68SPaul Mundt 
107157efa29SMatthew Wilcox (Oracle) 	if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
108157efa29SMatthew Wilcox (Oracle) 	    test_bit(PG_dcache_clean, &src->flags)) {
1097e01c949SPaul Mundt 		vfrom = kmap_coherent(from, vaddr);
110cbbe2f68SPaul Mundt 		copy_page(vto, vfrom);
1117e01c949SPaul Mundt 		kunmap_coherent(vfrom);
1127e01c949SPaul Mundt 	} else {
113bc3e11beSCong Wang 		vfrom = kmap_atomic(from);
1147e01c949SPaul Mundt 		copy_page(vto, vfrom);
115bc3e11beSCong Wang 		kunmap_atomic(vfrom);
1167e01c949SPaul Mundt 	}
11739ac11c1SStuart Menefy 
118a25bbe12SStuart Menefy 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
119a25bbe12SStuart Menefy 	    (vma->vm_flags & VM_EXEC))
1207e01c949SPaul Mundt 		__flush_purge_region(vto, PAGE_SIZE);
1217e01c949SPaul Mundt 
122bc3e11beSCong Wang 	kunmap_atomic(vto);
123cbbe2f68SPaul Mundt 	/* Make sure this page is cleared on other CPU's too before using it */
124cbbe2f68SPaul Mundt 	smp_wmb();
125cbbe2f68SPaul Mundt }
126cbbe2f68SPaul Mundt EXPORT_SYMBOL(copy_user_highpage);
127cbbe2f68SPaul Mundt 
128cbbe2f68SPaul Mundt void clear_user_highpage(struct page *page, unsigned long vaddr)
129cbbe2f68SPaul Mundt {
130bc3e11beSCong Wang 	void *kaddr = kmap_atomic(page);
131cbbe2f68SPaul Mundt 
13239ac11c1SStuart Menefy 	clear_page(kaddr);
133cbbe2f68SPaul Mundt 
1347e01c949SPaul Mundt 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
1357e01c949SPaul Mundt 		__flush_purge_region(kaddr, PAGE_SIZE);
1367e01c949SPaul Mundt 
137bc3e11beSCong Wang 	kunmap_atomic(kaddr);
138cbbe2f68SPaul Mundt }
139cbbe2f68SPaul Mundt EXPORT_SYMBOL(clear_user_highpage);
140cbbe2f68SPaul Mundt 
141cbbe2f68SPaul Mundt void __update_cache(struct vm_area_struct *vma,
142cbbe2f68SPaul Mundt 		    unsigned long address, pte_t pte)
143cbbe2f68SPaul Mundt {
144cbbe2f68SPaul Mundt 	unsigned long pfn = pte_pfn(pte);
145cbbe2f68SPaul Mundt 
146cbbe2f68SPaul Mundt 	if (!boot_cpu_data.dcache.n_aliases)
147cbbe2f68SPaul Mundt 		return;
148cbbe2f68SPaul Mundt 
149964f7e5aSPaul Mundt 	if (pfn_valid(pfn)) {
150157efa29SMatthew Wilcox (Oracle) 		struct folio *folio = page_folio(pfn_to_page(pfn));
151157efa29SMatthew Wilcox (Oracle) 		int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
15276382b5bSMarkus Pietrek 		if (dirty)
153157efa29SMatthew Wilcox (Oracle) 			__flush_purge_region(folio_address(folio),
154157efa29SMatthew Wilcox (Oracle) 						folio_size(folio));
155cbbe2f68SPaul Mundt 	}
156cbbe2f68SPaul Mundt }
157cbbe2f68SPaul Mundt 
158cbbe2f68SPaul Mundt void __flush_anon_page(struct page *page, unsigned long vmaddr)
159cbbe2f68SPaul Mundt {
160157efa29SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
161cbbe2f68SPaul Mundt 	unsigned long addr = (unsigned long) page_address(page);
162cbbe2f68SPaul Mundt 
163cbbe2f68SPaul Mundt 	if (pages_do_alias(addr, vmaddr)) {
164157efa29SMatthew Wilcox (Oracle) 		if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
165157efa29SMatthew Wilcox (Oracle) 		    test_bit(PG_dcache_clean, &folio->flags)) {
166cbbe2f68SPaul Mundt 			void *kaddr;
167cbbe2f68SPaul Mundt 
168cbbe2f68SPaul Mundt 			kaddr = kmap_coherent(page, vmaddr);
1696e4154d4SPaul Mundt 			/* XXX.. For now kunmap_coherent() does a purge */
1706e4154d4SPaul Mundt 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
1710906a3adSPaul Mundt 			kunmap_coherent(kaddr);
172cbbe2f68SPaul Mundt 		} else
173157efa29SMatthew Wilcox (Oracle) 			__flush_purge_region(folio_address(folio),
174157efa29SMatthew Wilcox (Oracle) 						folio_size(folio));
175cbbe2f68SPaul Mundt 	}
176cbbe2f68SPaul Mundt }
177ecba1060SPaul Mundt 
178f26b2a56SPaul Mundt void flush_cache_all(void)
179f26b2a56SPaul Mundt {
1806f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
181f26b2a56SPaul Mundt }
1820a993b0aSPaul Mundt EXPORT_SYMBOL(flush_cache_all);
183f26b2a56SPaul Mundt 
184f26b2a56SPaul Mundt void flush_cache_mm(struct mm_struct *mm)
185f26b2a56SPaul Mundt {
186654d364eSPaul Mundt 	if (boot_cpu_data.dcache.n_aliases == 0)
187654d364eSPaul Mundt 		return;
188654d364eSPaul Mundt 
1896f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
190f26b2a56SPaul Mundt }
191f26b2a56SPaul Mundt 
192f26b2a56SPaul Mundt void flush_cache_dup_mm(struct mm_struct *mm)
193f26b2a56SPaul Mundt {
194654d364eSPaul Mundt 	if (boot_cpu_data.dcache.n_aliases == 0)
195654d364eSPaul Mundt 		return;
196654d364eSPaul Mundt 
1976f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
198f26b2a56SPaul Mundt }
199f26b2a56SPaul Mundt 
200f26b2a56SPaul Mundt void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
201f26b2a56SPaul Mundt 		      unsigned long pfn)
202f26b2a56SPaul Mundt {
203f26b2a56SPaul Mundt 	struct flusher_data data;
204f26b2a56SPaul Mundt 
205f26b2a56SPaul Mundt 	data.vma = vma;
206f26b2a56SPaul Mundt 	data.addr1 = addr;
207f26b2a56SPaul Mundt 	data.addr2 = pfn;
208f26b2a56SPaul Mundt 
2096f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
210f26b2a56SPaul Mundt }
211f26b2a56SPaul Mundt 
212f26b2a56SPaul Mundt void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
213f26b2a56SPaul Mundt 		       unsigned long end)
214f26b2a56SPaul Mundt {
215f26b2a56SPaul Mundt 	struct flusher_data data;
216f26b2a56SPaul Mundt 
217f26b2a56SPaul Mundt 	data.vma = vma;
218f26b2a56SPaul Mundt 	data.addr1 = start;
219f26b2a56SPaul Mundt 	data.addr2 = end;
220f26b2a56SPaul Mundt 
2216f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
222f26b2a56SPaul Mundt }
2230a993b0aSPaul Mundt EXPORT_SYMBOL(flush_cache_range);
224f26b2a56SPaul Mundt 
225157efa29SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio)
226f26b2a56SPaul Mundt {
227157efa29SMatthew Wilcox (Oracle) 	cacheop_on_each_cpu(local_flush_dcache_folio, folio, 1);
228f26b2a56SPaul Mundt }
229157efa29SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio);
230f26b2a56SPaul Mundt 
231f26b2a56SPaul Mundt void flush_icache_range(unsigned long start, unsigned long end)
232f26b2a56SPaul Mundt {
233f26b2a56SPaul Mundt 	struct flusher_data data;
234f26b2a56SPaul Mundt 
235f26b2a56SPaul Mundt 	data.vma = NULL;
236f26b2a56SPaul Mundt 	data.addr1 = start;
237f26b2a56SPaul Mundt 	data.addr2 = end;
238f26b2a56SPaul Mundt 
2396f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
240f26b2a56SPaul Mundt }
241e3560305SPranith Kumar EXPORT_SYMBOL(flush_icache_range);
242f26b2a56SPaul Mundt 
243157efa29SMatthew Wilcox (Oracle) void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
244157efa29SMatthew Wilcox (Oracle) 		unsigned int nr)
245f26b2a56SPaul Mundt {
246157efa29SMatthew Wilcox (Oracle) 	/* Nothing uses the VMA, so just pass the folio along */
247157efa29SMatthew Wilcox (Oracle) 	cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1);
248f26b2a56SPaul Mundt }
249f26b2a56SPaul Mundt 
250f26b2a56SPaul Mundt void flush_cache_sigtramp(unsigned long address)
251f26b2a56SPaul Mundt {
2526f379578SPaul Mundt 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
253f26b2a56SPaul Mundt }
254f26b2a56SPaul Mundt 
25527d59ec1SPaul Mundt static void compute_alias(struct cache_info *c)
25627d59ec1SPaul Mundt {
25757155c65SRich Felker #ifdef CONFIG_MMU
25827d59ec1SPaul Mundt 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
25957155c65SRich Felker #else
26057155c65SRich Felker 	c->alias_mask = 0;
26157155c65SRich Felker #endif
26227d59ec1SPaul Mundt 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
26327d59ec1SPaul Mundt }
26427d59ec1SPaul Mundt 
26527d59ec1SPaul Mundt static void __init emit_cache_params(void)
26627d59ec1SPaul Mundt {
26727d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
26827d59ec1SPaul Mundt 		boot_cpu_data.icache.ways,
26927d59ec1SPaul Mundt 		boot_cpu_data.icache.sets,
27027d59ec1SPaul Mundt 		boot_cpu_data.icache.way_incr);
27127d59ec1SPaul Mundt 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
27227d59ec1SPaul Mundt 		boot_cpu_data.icache.entry_mask,
27327d59ec1SPaul Mundt 		boot_cpu_data.icache.alias_mask,
27427d59ec1SPaul Mundt 		boot_cpu_data.icache.n_aliases);
27527d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
27627d59ec1SPaul Mundt 		boot_cpu_data.dcache.ways,
27727d59ec1SPaul Mundt 		boot_cpu_data.dcache.sets,
27827d59ec1SPaul Mundt 		boot_cpu_data.dcache.way_incr);
27927d59ec1SPaul Mundt 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
28027d59ec1SPaul Mundt 		boot_cpu_data.dcache.entry_mask,
28127d59ec1SPaul Mundt 		boot_cpu_data.dcache.alias_mask,
28227d59ec1SPaul Mundt 		boot_cpu_data.dcache.n_aliases);
28327d59ec1SPaul Mundt 
28427d59ec1SPaul Mundt 	/*
28527d59ec1SPaul Mundt 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
28627d59ec1SPaul Mundt 	 */
28727d59ec1SPaul Mundt 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
28827d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
28927d59ec1SPaul Mundt 			boot_cpu_data.scache.ways,
29027d59ec1SPaul Mundt 			boot_cpu_data.scache.sets,
29127d59ec1SPaul Mundt 			boot_cpu_data.scache.way_incr);
29227d59ec1SPaul Mundt 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
29327d59ec1SPaul Mundt 			boot_cpu_data.scache.entry_mask,
29427d59ec1SPaul Mundt 			boot_cpu_data.scache.alias_mask,
29527d59ec1SPaul Mundt 			boot_cpu_data.scache.n_aliases);
29627d59ec1SPaul Mundt 	}
29727d59ec1SPaul Mundt }
29827d59ec1SPaul Mundt 
299ecba1060SPaul Mundt void __init cpu_cache_init(void)
300ecba1060SPaul Mundt {
3013af539e5SPaul Mundt 	unsigned int cache_disabled = 0;
3023af539e5SPaul Mundt 
303a5f6ea29SGeert Uytterhoeven #ifdef SH_CCR
304a5f6ea29SGeert Uytterhoeven 	cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
3053af539e5SPaul Mundt #endif
3065fb80ae8SMagnus Damm 
30727d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.icache);
30827d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.dcache);
30927d59ec1SPaul Mundt 	compute_alias(&boot_cpu_data.scache);
31027d59ec1SPaul Mundt 
31137443ef3SPaul Mundt 	__flush_wback_region		= noop__flush_region;
31237443ef3SPaul Mundt 	__flush_purge_region		= noop__flush_region;
31337443ef3SPaul Mundt 	__flush_invalidate_region	= noop__flush_region;
31437443ef3SPaul Mundt 
3155fb80ae8SMagnus Damm 	/*
3165fb80ae8SMagnus Damm 	 * No flushing is necessary in the disabled cache case so we can
3175fb80ae8SMagnus Damm 	 * just keep the noop functions in local_flush_..() and __flush_..()
3185fb80ae8SMagnus Damm 	 */
3195fb80ae8SMagnus Damm 	if (unlikely(cache_disabled))
3205fb80ae8SMagnus Damm 		goto skip;
3215fb80ae8SMagnus Damm 
3225a846abaSRich Felker 	if (boot_cpu_data.type == CPU_J2) {
3235a846abaSRich Felker 		j2_cache_init();
3245a846abaSRich Felker 	} else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
325109b44a8SPaul Mundt 		sh2_cache_init();
326109b44a8SPaul Mundt 	}
327109b44a8SPaul Mundt 
328a58e1a2aSPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
329a58e1a2aSPaul Mundt 		sh2a_cache_init();
330a58e1a2aSPaul Mundt 	}
331a58e1a2aSPaul Mundt 
33279f1c9daSPaul Mundt 	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
33379f1c9daSPaul Mundt 		sh3_cache_init();
3340d051d90SPaul Mundt 
3350d051d90SPaul Mundt 		if ((boot_cpu_data.type == CPU_SH7705) &&
3360d051d90SPaul Mundt 		    (boot_cpu_data.dcache.sets == 512)) {
3370d051d90SPaul Mundt 			sh7705_cache_init();
3380d051d90SPaul Mundt 		}
33979f1c9daSPaul Mundt 	}
34079f1c9daSPaul Mundt 
341ecba1060SPaul Mundt 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
342ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
343ecba1060SPaul Mundt 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
344ecba1060SPaul Mundt 		sh4_cache_init();
3453cf6fa1eSPaul Mundt 
3463cf6fa1eSPaul Mundt 		if ((boot_cpu_data.type == CPU_SH7786) ||
3473cf6fa1eSPaul Mundt 		    (boot_cpu_data.type == CPU_SHX3)) {
3483cf6fa1eSPaul Mundt 			shx3_cache_init();
3493cf6fa1eSPaul Mundt 		}
350ecba1060SPaul Mundt 	}
35127d59ec1SPaul Mundt 
3525fb80ae8SMagnus Damm skip:
35327d59ec1SPaul Mundt 	emit_cache_params();
354ecba1060SPaul Mundt }
355