xref: /linux/arch/sh/mm/cache.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  * arch/sh/mm/cache.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2009  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28 
29 void (*__flush_wback_region)(void *start, int size);
30 EXPORT_SYMBOL(__flush_wback_region);
31 void (*__flush_purge_region)(void *start, int size);
32 EXPORT_SYMBOL(__flush_purge_region);
33 void (*__flush_invalidate_region)(void *start, int size);
34 EXPORT_SYMBOL(__flush_invalidate_region);
35 
36 static inline void noop__flush_region(void *start, int size)
37 {
38 }
39 
40 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41                                    int wait)
42 {
43 	preempt_disable();
44 	smp_call_function(func, info, wait);
45 	func(info);
46 	preempt_enable();
47 }
48 
49 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
50 		       unsigned long vaddr, void *dst, const void *src,
51 		       unsigned long len)
52 {
53 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
54 	    !test_bit(PG_dcache_dirty, &page->flags)) {
55 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
56 		memcpy(vto, src, len);
57 		kunmap_coherent(vto);
58 	} else {
59 		memcpy(dst, src, len);
60 		if (boot_cpu_data.dcache.n_aliases)
61 			set_bit(PG_dcache_dirty, &page->flags);
62 	}
63 
64 	if (vma->vm_flags & VM_EXEC)
65 		flush_cache_page(vma, vaddr, page_to_pfn(page));
66 }
67 
68 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
69 			 unsigned long vaddr, void *dst, const void *src,
70 			 unsigned long len)
71 {
72 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
73 	    !test_bit(PG_dcache_dirty, &page->flags)) {
74 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
75 		memcpy(dst, vfrom, len);
76 		kunmap_coherent(vfrom);
77 	} else {
78 		memcpy(dst, src, len);
79 		if (boot_cpu_data.dcache.n_aliases)
80 			set_bit(PG_dcache_dirty, &page->flags);
81 	}
82 }
83 
84 void copy_user_highpage(struct page *to, struct page *from,
85 			unsigned long vaddr, struct vm_area_struct *vma)
86 {
87 	void *vfrom, *vto;
88 
89 	vto = kmap_atomic(to, KM_USER1);
90 
91 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
92 	    !test_bit(PG_dcache_dirty, &from->flags)) {
93 		vfrom = kmap_coherent(from, vaddr);
94 		copy_page(vto, vfrom);
95 		kunmap_coherent(vfrom);
96 	} else {
97 		vfrom = kmap_atomic(from, KM_USER0);
98 		copy_page(vto, vfrom);
99 		kunmap_atomic(vfrom, KM_USER0);
100 	}
101 
102 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
103 		__flush_purge_region(vto, PAGE_SIZE);
104 
105 	kunmap_atomic(vto, KM_USER1);
106 	/* Make sure this page is cleared on other CPU's too before using it */
107 	smp_wmb();
108 }
109 EXPORT_SYMBOL(copy_user_highpage);
110 
111 void clear_user_highpage(struct page *page, unsigned long vaddr)
112 {
113 	void *kaddr = kmap_atomic(page, KM_USER0);
114 
115 	clear_page(kaddr);
116 
117 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
118 		__flush_purge_region(kaddr, PAGE_SIZE);
119 
120 	kunmap_atomic(kaddr, KM_USER0);
121 }
122 EXPORT_SYMBOL(clear_user_highpage);
123 
124 void __update_cache(struct vm_area_struct *vma,
125 		    unsigned long address, pte_t pte)
126 {
127 	struct page *page;
128 	unsigned long pfn = pte_pfn(pte);
129 
130 	if (!boot_cpu_data.dcache.n_aliases)
131 		return;
132 
133 	page = pfn_to_page(pfn);
134 	if (pfn_valid(pfn)) {
135 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
136 		if (dirty) {
137 			unsigned long addr = (unsigned long)page_address(page);
138 
139 			if (pages_do_alias(addr, address & PAGE_MASK))
140 				__flush_purge_region((void *)addr, PAGE_SIZE);
141 		}
142 	}
143 }
144 
145 void __flush_anon_page(struct page *page, unsigned long vmaddr)
146 {
147 	unsigned long addr = (unsigned long) page_address(page);
148 
149 	if (pages_do_alias(addr, vmaddr)) {
150 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
151 		    !test_bit(PG_dcache_dirty, &page->flags)) {
152 			void *kaddr;
153 
154 			kaddr = kmap_coherent(page, vmaddr);
155 			/* XXX.. For now kunmap_coherent() does a purge */
156 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
157 			kunmap_coherent(kaddr);
158 		} else
159 			__flush_purge_region((void *)addr, PAGE_SIZE);
160 	}
161 }
162 
163 void flush_cache_all(void)
164 {
165 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
166 }
167 EXPORT_SYMBOL(flush_cache_all);
168 
169 void flush_cache_mm(struct mm_struct *mm)
170 {
171 	if (boot_cpu_data.dcache.n_aliases == 0)
172 		return;
173 
174 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
175 }
176 
177 void flush_cache_dup_mm(struct mm_struct *mm)
178 {
179 	if (boot_cpu_data.dcache.n_aliases == 0)
180 		return;
181 
182 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
183 }
184 
185 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
186 		      unsigned long pfn)
187 {
188 	struct flusher_data data;
189 
190 	data.vma = vma;
191 	data.addr1 = addr;
192 	data.addr2 = pfn;
193 
194 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
195 }
196 
197 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
198 		       unsigned long end)
199 {
200 	struct flusher_data data;
201 
202 	data.vma = vma;
203 	data.addr1 = start;
204 	data.addr2 = end;
205 
206 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
207 }
208 EXPORT_SYMBOL(flush_cache_range);
209 
210 void flush_dcache_page(struct page *page)
211 {
212 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
213 }
214 EXPORT_SYMBOL(flush_dcache_page);
215 
216 void flush_icache_range(unsigned long start, unsigned long end)
217 {
218 	struct flusher_data data;
219 
220 	data.vma = NULL;
221 	data.addr1 = start;
222 	data.addr2 = end;
223 
224 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
225 }
226 
227 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
228 {
229 	/* Nothing uses the VMA, so just pass the struct page along */
230 	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
231 }
232 
233 void flush_cache_sigtramp(unsigned long address)
234 {
235 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
236 }
237 
238 static void compute_alias(struct cache_info *c)
239 {
240 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
241 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
242 }
243 
244 static void __init emit_cache_params(void)
245 {
246 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
247 		boot_cpu_data.icache.ways,
248 		boot_cpu_data.icache.sets,
249 		boot_cpu_data.icache.way_incr);
250 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
251 		boot_cpu_data.icache.entry_mask,
252 		boot_cpu_data.icache.alias_mask,
253 		boot_cpu_data.icache.n_aliases);
254 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
255 		boot_cpu_data.dcache.ways,
256 		boot_cpu_data.dcache.sets,
257 		boot_cpu_data.dcache.way_incr);
258 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
259 		boot_cpu_data.dcache.entry_mask,
260 		boot_cpu_data.dcache.alias_mask,
261 		boot_cpu_data.dcache.n_aliases);
262 
263 	/*
264 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
265 	 */
266 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
267 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268 			boot_cpu_data.scache.ways,
269 			boot_cpu_data.scache.sets,
270 			boot_cpu_data.scache.way_incr);
271 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272 			boot_cpu_data.scache.entry_mask,
273 			boot_cpu_data.scache.alias_mask,
274 			boot_cpu_data.scache.n_aliases);
275 	}
276 }
277 
278 void __init cpu_cache_init(void)
279 {
280 	unsigned int cache_disabled = 0;
281 
282 #ifdef CCR
283 	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
284 #endif
285 
286 	compute_alias(&boot_cpu_data.icache);
287 	compute_alias(&boot_cpu_data.dcache);
288 	compute_alias(&boot_cpu_data.scache);
289 
290 	__flush_wback_region		= noop__flush_region;
291 	__flush_purge_region		= noop__flush_region;
292 	__flush_invalidate_region	= noop__flush_region;
293 
294 	/*
295 	 * No flushing is necessary in the disabled cache case so we can
296 	 * just keep the noop functions in local_flush_..() and __flush_..()
297 	 */
298 	if (unlikely(cache_disabled))
299 		goto skip;
300 
301 	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
302 		extern void __weak sh2_cache_init(void);
303 
304 		sh2_cache_init();
305 	}
306 
307 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
308 		extern void __weak sh2a_cache_init(void);
309 
310 		sh2a_cache_init();
311 	}
312 
313 	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
314 		extern void __weak sh3_cache_init(void);
315 
316 		sh3_cache_init();
317 
318 		if ((boot_cpu_data.type == CPU_SH7705) &&
319 		    (boot_cpu_data.dcache.sets == 512)) {
320 			extern void __weak sh7705_cache_init(void);
321 
322 			sh7705_cache_init();
323 		}
324 	}
325 
326 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
327 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
328 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
329 		extern void __weak sh4_cache_init(void);
330 
331 		sh4_cache_init();
332 	}
333 
334 	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
335 		extern void __weak sh5_cache_init(void);
336 
337 		sh5_cache_init();
338 	}
339 
340 skip:
341 	emit_cache_params();
342 }
343