xref: /linux/arch/powerpc/include/asm/cacheflush.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  *  This program is free software; you can redistribute it and/or
3  *  modify it under the terms of the GNU General Public License
4  *  as published by the Free Software Foundation; either version
5  *  2 of the License, or (at your option) any later version.
6  */
7 #ifndef _ASM_POWERPC_CACHEFLUSH_H
8 #define _ASM_POWERPC_CACHEFLUSH_H
9 
10 #ifdef __KERNEL__
11 
12 #include <linux/mm.h>
13 #include <asm/cputable.h>
14 #include <asm/cpu_has_feature.h>
15 
16 /*
17  * No cache flushing is required when address mappings are changed,
18  * because the caches on PowerPCs are physically addressed.
19  */
20 #define flush_cache_all()			do { } while (0)
21 #define flush_cache_mm(mm)			do { } while (0)
22 #define flush_cache_dup_mm(mm)			do { } while (0)
23 #define flush_cache_range(vma, start, end)	do { } while (0)
24 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
25 #define flush_icache_page(vma, page)		do { } while (0)
26 #define flush_cache_vmap(start, end)		do { } while (0)
27 #define flush_cache_vunmap(start, end)		do { } while (0)
28 
29 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
30 extern void flush_dcache_page(struct page *page);
31 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
32 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
33 
34 extern void flush_icache_range(unsigned long, unsigned long);
35 extern void flush_icache_user_range(struct vm_area_struct *vma,
36 				    struct page *page, unsigned long addr,
37 				    int len);
38 extern void __flush_dcache_icache(void *page_va);
39 extern void flush_dcache_icache_page(struct page *page);
40 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
41 extern void __flush_dcache_icache_phys(unsigned long physaddr);
42 #else
43 static inline void __flush_dcache_icache_phys(unsigned long physaddr)
44 {
45 	BUG();
46 }
47 #endif
48 
49 #ifdef CONFIG_PPC32
50 /*
51  * Write any modified data cache blocks out to memory and invalidate them.
52  * Does not invalidate the corresponding instruction cache blocks.
53  */
54 static inline void flush_dcache_range(unsigned long start, unsigned long stop)
55 {
56 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
57 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
58 	unsigned long i;
59 
60 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
61 		dcbf(addr);
62 	mb();	/* sync */
63 }
64 
65 /*
66  * Write any modified data cache blocks out to memory.
67  * Does not invalidate the corresponding cache lines (especially for
68  * any corresponding instruction cache).
69  */
70 static inline void clean_dcache_range(unsigned long start, unsigned long stop)
71 {
72 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
73 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
74 	unsigned long i;
75 
76 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
77 		dcbst(addr);
78 	mb();	/* sync */
79 }
80 
81 /*
82  * Like above, but invalidate the D-cache.  This is used by the 8xx
83  * to invalidate the cache so the PPC core doesn't get stale data
84  * from the CPM (no cache snooping here :-).
85  */
86 static inline void invalidate_dcache_range(unsigned long start,
87 					   unsigned long stop)
88 {
89 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
90 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
91 	unsigned long i;
92 
93 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
94 		dcbi(addr);
95 	mb();	/* sync */
96 }
97 
98 #endif /* CONFIG_PPC32 */
99 #ifdef CONFIG_PPC64
100 extern void flush_dcache_range(unsigned long start, unsigned long stop);
101 extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
102 extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
103 #endif
104 
105 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
106 	do { \
107 		memcpy(dst, src, len); \
108 		flush_icache_user_range(vma, page, vaddr, len); \
109 	} while (0)
110 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
111 	memcpy(dst, src, len)
112 
113 #endif /* __KERNEL__ */
114 
115 #endif /* _ASM_POWERPC_CACHEFLUSH_H */
116