xref: /linux/arch/sh/include/asm/cacheflush.h (revision 0d051d90bb08b516b9d6c30d25f83d3c6b5b1c1d)
1 #ifndef __ASM_SH_CACHEFLUSH_H
2 #define __ASM_SH_CACHEFLUSH_H
3 
4 #ifdef __KERNEL__
5 
6 #include <linux/mm.h>
7 #include <cpu/cacheflush.h>
8 
9 #define ARCH_HAS_FLUSH_ANON_PAGE
10 extern void __flush_anon_page(struct page *page, unsigned long);
11 
12 static inline void flush_anon_page(struct vm_area_struct *vma,
13 				   struct page *page, unsigned long vmaddr)
14 {
15 	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
16 		__flush_anon_page(page, vmaddr);
17 }
18 
19 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
20 static inline void flush_kernel_dcache_page(struct page *page)
21 {
22 	flush_dcache_page(page);
23 }
24 
25 extern void copy_to_user_page(struct vm_area_struct *vma,
26 	struct page *page, unsigned long vaddr, void *dst, const void *src,
27 	unsigned long len);
28 
29 extern void copy_from_user_page(struct vm_area_struct *vma,
30 	struct page *page, unsigned long vaddr, void *dst, const void *src,
31 	unsigned long len);
32 
33 #define flush_cache_vmap(start, end)		flush_cache_all()
34 #define flush_cache_vunmap(start, end)		flush_cache_all()
35 
36 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
37 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
38 
39 void kmap_coherent_init(void);
40 void *kmap_coherent(struct page *page, unsigned long addr);
41 void kunmap_coherent(void);
42 
43 #define PG_dcache_dirty	PG_arch_1
44 
45 void cpu_cache_init(void);
46 
47 #endif /* __KERNEL__ */
48 #endif /* __ASM_SH_CACHEFLUSH_H */
49