xref: /linux/arch/arm/include/asm/cacheflush.h (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 /*
2  *  arch/arm/include/asm/cacheflush.h
3  *
4  *  Copyright (C) 1999-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
12 
13 #include <linux/mm.h>
14 
15 #include <asm/glue.h>
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
18 
19 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
20 
21 /*
22  *	Cache Model
23  *	===========
24  */
25 #undef _CACHE
26 #undef MULTI_CACHE
27 
28 #if defined(CONFIG_CPU_CACHE_V3)
29 # ifdef _CACHE
30 #  define MULTI_CACHE 1
31 # else
32 #  define _CACHE v3
33 # endif
34 #endif
35 
36 #if defined(CONFIG_CPU_CACHE_V4)
37 # ifdef _CACHE
38 #  define MULTI_CACHE 1
39 # else
40 #  define _CACHE v4
41 # endif
42 #endif
43 
44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45     defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46     defined(CONFIG_CPU_ARM1026)
47 # define MULTI_CACHE 1
48 #endif
49 
50 #if defined(CONFIG_CPU_FA526)
51 # ifdef _CACHE
52 #  define MULTI_CACHE 1
53 # else
54 #  define _CACHE fa
55 # endif
56 #endif
57 
58 #if defined(CONFIG_CPU_ARM926T)
59 # ifdef _CACHE
60 #  define MULTI_CACHE 1
61 # else
62 #  define _CACHE arm926
63 # endif
64 #endif
65 
66 #if defined(CONFIG_CPU_ARM940T)
67 # ifdef _CACHE
68 #  define MULTI_CACHE 1
69 # else
70 #  define _CACHE arm940
71 # endif
72 #endif
73 
74 #if defined(CONFIG_CPU_ARM946E)
75 # ifdef _CACHE
76 #  define MULTI_CACHE 1
77 # else
78 #  define _CACHE arm946
79 # endif
80 #endif
81 
82 #if defined(CONFIG_CPU_CACHE_V4WB)
83 # ifdef _CACHE
84 #  define MULTI_CACHE 1
85 # else
86 #  define _CACHE v4wb
87 # endif
88 #endif
89 
90 #if defined(CONFIG_CPU_XSCALE)
91 # ifdef _CACHE
92 #  define MULTI_CACHE 1
93 # else
94 #  define _CACHE xscale
95 # endif
96 #endif
97 
98 #if defined(CONFIG_CPU_XSC3)
99 # ifdef _CACHE
100 #  define MULTI_CACHE 1
101 # else
102 #  define _CACHE xsc3
103 # endif
104 #endif
105 
106 #if defined(CONFIG_CPU_MOHAWK)
107 # ifdef _CACHE
108 #  define MULTI_CACHE 1
109 # else
110 #  define _CACHE mohawk
111 # endif
112 #endif
113 
114 #if defined(CONFIG_CPU_FEROCEON)
115 # define MULTI_CACHE 1
116 #endif
117 
118 #if defined(CONFIG_CPU_V6)
119 //# ifdef _CACHE
120 #  define MULTI_CACHE 1
121 //# else
122 //#  define _CACHE v6
123 //# endif
124 #endif
125 
126 #if defined(CONFIG_CPU_V7)
127 //# ifdef _CACHE
128 #  define MULTI_CACHE 1
129 //# else
130 //#  define _CACHE v7
131 //# endif
132 #endif
133 
134 #if !defined(_CACHE) && !defined(MULTI_CACHE)
135 #error Unknown cache maintainence model
136 #endif
137 
138 /*
139  * This flag is used to indicate that the page pointed to by a pte
140  * is dirty and requires cleaning before returning it to the user.
141  */
142 #define PG_dcache_dirty PG_arch_1
143 
144 /*
145  *	MM Cache Management
146  *	===================
147  *
148  *	The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
149  *	implement these methods.
150  *
151  *	Start addresses are inclusive and end addresses are exclusive;
152  *	start addresses should be rounded down, end addresses up.
153  *
154  *	See Documentation/cachetlb.txt for more information.
155  *	Please note that the implementation of these, and the required
156  *	effects are cache-type (VIVT/VIPT/PIPT) specific.
157  *
158  *	flush_kern_all()
159  *
160  *		Unconditionally clean and invalidate the entire cache.
161  *
162  *	flush_user_all()
163  *
164  *		Clean and invalidate all user space cache entries
165  *		before a change of page tables.
166  *
167  *	flush_user_range(start, end, flags)
168  *
169  *		Clean and invalidate a range of cache entries in the
170  *		specified address space before a change of page tables.
171  *		- start - user start address (inclusive, page aligned)
172  *		- end   - user end address   (exclusive, page aligned)
173  *		- flags - vma->vm_flags field
174  *
175  *	coherent_kern_range(start, end)
176  *
177  *		Ensure coherency between the Icache and the Dcache in the
178  *		region described by start, end.  If you have non-snooping
179  *		Harvard caches, you need to implement this function.
180  *		- start  - virtual start address
181  *		- end    - virtual end address
182  *
183  *	coherent_user_range(start, end)
184  *
185  *		Ensure coherency between the Icache and the Dcache in the
186  *		region described by start, end.  If you have non-snooping
187  *		Harvard caches, you need to implement this function.
188  *		- start  - virtual start address
189  *		- end    - virtual end address
190  *
191  *	flush_kern_dcache_area(kaddr, size)
192  *
193  *		Ensure that the data held in page is written back.
194  *		- kaddr  - page address
195  *		- size   - region size
196  *
197  *	DMA Cache Coherency
198  *	===================
199  *
200  *	dma_flush_range(start, end)
201  *
202  *		Clean and invalidate the specified virtual address range.
203  *		- start  - virtual start address
204  *		- end    - virtual end address
205  */
206 
207 struct cpu_cache_fns {
208 	void (*flush_kern_all)(void);
209 	void (*flush_user_all)(void);
210 	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
211 
212 	void (*coherent_kern_range)(unsigned long, unsigned long);
213 	void (*coherent_user_range)(unsigned long, unsigned long);
214 	void (*flush_kern_dcache_area)(void *, size_t);
215 
216 	void (*dma_map_area)(const void *, size_t, int);
217 	void (*dma_unmap_area)(const void *, size_t, int);
218 
219 	void (*dma_flush_range)(const void *, const void *);
220 };
221 
222 struct outer_cache_fns {
223 	void (*inv_range)(unsigned long, unsigned long);
224 	void (*clean_range)(unsigned long, unsigned long);
225 	void (*flush_range)(unsigned long, unsigned long);
226 };
227 
228 /*
229  * Select the calling method
230  */
231 #ifdef MULTI_CACHE
232 
233 extern struct cpu_cache_fns cpu_cache;
234 
235 #define __cpuc_flush_kern_all		cpu_cache.flush_kern_all
236 #define __cpuc_flush_user_all		cpu_cache.flush_user_all
237 #define __cpuc_flush_user_range		cpu_cache.flush_user_range
238 #define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
239 #define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
240 #define __cpuc_flush_dcache_area	cpu_cache.flush_kern_dcache_area
241 
242 /*
243  * These are private to the dma-mapping API.  Do not use directly.
244  * Their sole purpose is to ensure that data held in the cache
245  * is visible to DMA, or data written by DMA to system memory is
246  * visible to the CPU.
247  */
248 #define dmac_map_area			cpu_cache.dma_map_area
249 #define dmac_unmap_area		cpu_cache.dma_unmap_area
250 #define dmac_flush_range		cpu_cache.dma_flush_range
251 
252 #else
253 
254 #define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
255 #define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
256 #define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
257 #define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
258 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
259 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
260 
261 extern void __cpuc_flush_kern_all(void);
262 extern void __cpuc_flush_user_all(void);
263 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
264 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
265 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
266 extern void __cpuc_flush_dcache_area(void *, size_t);
267 
268 /*
269  * These are private to the dma-mapping API.  Do not use directly.
270  * Their sole purpose is to ensure that data held in the cache
271  * is visible to DMA, or data written by DMA to system memory is
272  * visible to the CPU.
273  */
274 #define dmac_map_area			__glue(_CACHE,_dma_map_area)
275 #define dmac_unmap_area		__glue(_CACHE,_dma_unmap_area)
276 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
277 
278 extern void dmac_map_area(const void *, size_t, int);
279 extern void dmac_unmap_area(const void *, size_t, int);
280 extern void dmac_flush_range(const void *, const void *);
281 
282 #endif
283 
284 #ifdef CONFIG_OUTER_CACHE
285 
286 extern struct outer_cache_fns outer_cache;
287 
288 static inline void outer_inv_range(unsigned long start, unsigned long end)
289 {
290 	if (outer_cache.inv_range)
291 		outer_cache.inv_range(start, end);
292 }
293 static inline void outer_clean_range(unsigned long start, unsigned long end)
294 {
295 	if (outer_cache.clean_range)
296 		outer_cache.clean_range(start, end);
297 }
298 static inline void outer_flush_range(unsigned long start, unsigned long end)
299 {
300 	if (outer_cache.flush_range)
301 		outer_cache.flush_range(start, end);
302 }
303 
304 #else
305 
306 static inline void outer_inv_range(unsigned long start, unsigned long end)
307 { }
308 static inline void outer_clean_range(unsigned long start, unsigned long end)
309 { }
310 static inline void outer_flush_range(unsigned long start, unsigned long end)
311 { }
312 
313 #endif
314 
315 /*
316  * Copy user data from/to a page which is mapped into a different
317  * processes address space.  Really, we want to allow our "user
318  * space" model to handle this.
319  */
320 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
321 	unsigned long, void *, const void *, unsigned long);
322 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
323 	do {							\
324 		memcpy(dst, src, len);				\
325 	} while (0)
326 
327 /*
328  * Convert calls to our calling convention.
329  */
330 #define flush_cache_all()		__cpuc_flush_kern_all()
331 
332 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
333 {
334 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
335 		__cpuc_flush_user_all();
336 }
337 
338 static inline void
339 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
340 {
341 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
342 		__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
343 					vma->vm_flags);
344 }
345 
346 static inline void
347 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
348 {
349 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
350 		unsigned long addr = user_addr & PAGE_MASK;
351 		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
352 	}
353 }
354 
355 #ifndef CONFIG_CPU_CACHE_VIPT
356 #define flush_cache_mm(mm) \
357 		vivt_flush_cache_mm(mm)
358 #define flush_cache_range(vma,start,end) \
359 		vivt_flush_cache_range(vma,start,end)
360 #define flush_cache_page(vma,addr,pfn) \
361 		vivt_flush_cache_page(vma,addr,pfn)
362 #else
363 extern void flush_cache_mm(struct mm_struct *mm);
364 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
365 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
366 #endif
367 
368 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
369 
370 /*
371  * flush_cache_user_range is used when we want to ensure that the
372  * Harvard caches are synchronised for the user space address range.
373  * This is used for the ARM private sys_cacheflush system call.
374  */
375 #define flush_cache_user_range(vma,start,end) \
376 	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
377 
378 /*
379  * Perform necessary cache operations to ensure that data previously
380  * stored within this range of addresses can be executed by the CPU.
381  */
382 #define flush_icache_range(s,e)		__cpuc_coherent_kern_range(s,e)
383 
384 /*
385  * Perform necessary cache operations to ensure that the TLB will
386  * see data written in the specified area.
387  */
388 #define clean_dcache_area(start,size)	cpu_dcache_clean_area(start, size)
389 
390 /*
391  * flush_dcache_page is used when the kernel has written to the page
392  * cache page at virtual address page->virtual.
393  *
394  * If this page isn't mapped (ie, page_mapping == NULL), or it might
395  * have userspace mappings, then we _must_ always clean + invalidate
396  * the dcache entries associated with the kernel mapping.
397  *
398  * Otherwise we can defer the operation, and clean the cache when we are
399  * about to change to user space.  This is the same method as used on SPARC64.
400  * See update_mmu_cache for the user space part.
401  */
402 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
403 extern void flush_dcache_page(struct page *);
404 
405 static inline void __flush_icache_all(void)
406 {
407 #ifdef CONFIG_ARM_ERRATA_411920
408 	extern void v6_icache_inval_all(void);
409 	v6_icache_inval_all();
410 #else
411 	asm("mcr	p15, 0, %0, c7, c5, 0	@ invalidate I-cache\n"
412 	    :
413 	    : "r" (0));
414 #endif
415 }
416 static inline void flush_kernel_vmap_range(void *addr, int size)
417 {
418 	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
419 	  __cpuc_flush_dcache_area(addr, (size_t)size);
420 }
421 static inline void invalidate_kernel_vmap_range(void *addr, int size)
422 {
423 	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
424 	  __cpuc_flush_dcache_area(addr, (size_t)size);
425 }
426 
427 #define ARCH_HAS_FLUSH_ANON_PAGE
428 static inline void flush_anon_page(struct vm_area_struct *vma,
429 			 struct page *page, unsigned long vmaddr)
430 {
431 	extern void __flush_anon_page(struct vm_area_struct *vma,
432 				struct page *, unsigned long);
433 	if (PageAnon(page))
434 		__flush_anon_page(vma, page, vmaddr);
435 }
436 
437 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
438 static inline void flush_kernel_dcache_page(struct page *page)
439 {
440 	/* highmem pages are always flushed upon kunmap already */
441 	if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
442 		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
443 }
444 
445 #define flush_dcache_mmap_lock(mapping) \
446 	spin_lock_irq(&(mapping)->tree_lock)
447 #define flush_dcache_mmap_unlock(mapping) \
448 	spin_unlock_irq(&(mapping)->tree_lock)
449 
450 #define flush_icache_user_range(vma,page,addr,len) \
451 	flush_dcache_page(page)
452 
453 /*
454  * We don't appear to need to do anything here.  In fact, if we did, we'd
455  * duplicate cache flushing elsewhere performed by flush_dcache_page().
456  */
457 #define flush_icache_page(vma,page)	do { } while (0)
458 
459 /*
460  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
461  * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
462  * caches, since the direct-mappings of these pages may contain cached
463  * data, we need to do a full cache flush to ensure that writebacks
464  * don't corrupt data placed into these pages via the new mappings.
465  */
466 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
467 {
468 	if (!cache_is_vipt_nonaliasing())
469 		flush_cache_all();
470 	else
471 		/*
472 		 * set_pte_at() called from vmap_pte_range() does not
473 		 * have a DSB after cleaning the cache line.
474 		 */
475 		dsb();
476 }
477 
478 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
479 {
480 	if (!cache_is_vipt_nonaliasing())
481 		flush_cache_all();
482 }
483 
484 #endif
485