xref: /linux/arch/csky/abiv1/inc/abi/cacheflush.h (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #ifndef __ABI_CSKY_CACHEFLUSH_H
5 #define __ABI_CSKY_CACHEFLUSH_H
6 
7 #include <linux/mm.h>
8 #include <asm/string.h>
9 #include <asm/cache.h>
10 
11 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
12 extern void flush_dcache_page(struct page *);
13 
14 #define flush_cache_mm(mm)			dcache_wbinv_all()
15 #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
16 #define flush_cache_dup_mm(mm)			cache_wbinv_all()
17 
18 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19 extern void flush_kernel_dcache_page(struct page *);
20 
21 #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
22 #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
23 
24 static inline void flush_kernel_vmap_range(void *addr, int size)
25 {
26 	dcache_wbinv_all();
27 }
28 static inline void invalidate_kernel_vmap_range(void *addr, int size)
29 {
30 	dcache_wbinv_all();
31 }
32 
33 #define ARCH_HAS_FLUSH_ANON_PAGE
34 static inline void flush_anon_page(struct vm_area_struct *vma,
35 			 struct page *page, unsigned long vmaddr)
36 {
37 	if (PageAnon(page))
38 		cache_wbinv_all();
39 }
40 
41 /*
42  * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
43  * Use cache_wbinv_all() here and need to be improved in future.
44  */
45 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
46 #define flush_cache_vmap(start, end)		cache_wbinv_all()
47 #define flush_cache_vunmap(start, end)		cache_wbinv_all()
48 
49 #define flush_icache_page(vma, page)		do {} while (0);
50 #define flush_icache_range(start, end)		cache_wbinv_range(start, end)
51 
52 #define flush_icache_user_range(vma,page,addr,len) \
53 	flush_dcache_page(page)
54 
55 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
56 do { \
57 	memcpy(dst, src, len); \
58 } while (0)
59 
60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
61 do { \
62 	memcpy(dst, src, len); \
63 	cache_wbinv_all(); \
64 } while (0)
65 
66 #endif /* __ABI_CSKY_CACHEFLUSH_H */
67