xref: /linux/arch/csky/abiv1/inc/abi/cacheflush.h (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ABI_CSKY_CACHEFLUSH_H
4 #define __ABI_CSKY_CACHEFLUSH_H
5 
6 #include <linux/mm.h>
7 #include <asm/string.h>
8 #include <asm/cache.h>
9 
10 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
11 extern void flush_dcache_page(struct page *);
12 void flush_dcache_folio(struct folio *);
13 #define flush_dcache_folio flush_dcache_folio
14 
15 #define flush_cache_mm(mm)			dcache_wbinv_all()
16 #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
17 #define flush_cache_dup_mm(mm)			cache_wbinv_all()
18 
19 #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
20 #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
21 
22 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
23 static inline void flush_kernel_vmap_range(void *addr, int size)
24 {
25 	dcache_wbinv_all();
26 }
27 static inline void invalidate_kernel_vmap_range(void *addr, int size)
28 {
29 	dcache_wbinv_all();
30 }
31 
32 #define ARCH_HAS_FLUSH_ANON_PAGE
33 static inline void flush_anon_page(struct vm_area_struct *vma,
34 			 struct page *page, unsigned long vmaddr)
35 {
36 	if (PageAnon(page))
37 		cache_wbinv_all();
38 }
39 
40 /*
41  * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
42  * Use cache_wbinv_all() here and need to be improved in future.
43  */
44 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
45 #define flush_cache_vmap(start, end)		cache_wbinv_all()
46 #define flush_cache_vmap_early(start, end)	do { } while (0)
47 #define flush_cache_vunmap(start, end)		cache_wbinv_all()
48 
49 #define flush_icache_range(start, end)		cache_wbinv_range(start, end)
50 #define flush_icache_mm_range(mm, start, end)	cache_wbinv_range(start, end)
51 #define flush_icache_deferred(mm)		do {} while (0);
52 
53 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
54 do { \
55 	memcpy(dst, src, len); \
56 } while (0)
57 
58 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59 do { \
60 	memcpy(dst, src, len); \
61 	cache_wbinv_all(); \
62 } while (0)
63 
64 #endif /* __ABI_CSKY_CACHEFLUSH_H */
65