xref: /linux/arch/riscv/include/asm/cacheflush.h (revision 2d69fbf3d01a5b71e98137e2406d4087960c512e)
150acfb2bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2fab957c1SPalmer Dabbelt /*
3fab957c1SPalmer Dabbelt  * Copyright (C) 2015 Regents of the University of California
4fab957c1SPalmer Dabbelt  */
5fab957c1SPalmer Dabbelt 
6fab957c1SPalmer Dabbelt #ifndef _ASM_RISCV_CACHEFLUSH_H
7fab957c1SPalmer Dabbelt #define _ASM_RISCV_CACHEFLUSH_H
8fab957c1SPalmer Dabbelt 
9*2d69fbf3SPaul Walmsley #include <linux/mm.h>
10fab957c1SPalmer Dabbelt 
11*2d69fbf3SPaul Walmsley #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
12*2d69fbf3SPaul Walmsley 
13*2d69fbf3SPaul Walmsley /*
14*2d69fbf3SPaul Walmsley  * The cache doesn't need to be flushed when TLB entries change when
15*2d69fbf3SPaul Walmsley  * the cache is mapped to physical memory, not virtual memory
16*2d69fbf3SPaul Walmsley  */
17*2d69fbf3SPaul Walmsley static inline void flush_cache_all(void)
18*2d69fbf3SPaul Walmsley {
19*2d69fbf3SPaul Walmsley }
20*2d69fbf3SPaul Walmsley 
21*2d69fbf3SPaul Walmsley static inline void flush_cache_mm(struct mm_struct *mm)
22*2d69fbf3SPaul Walmsley {
23*2d69fbf3SPaul Walmsley }
24*2d69fbf3SPaul Walmsley 
25*2d69fbf3SPaul Walmsley static inline void flush_cache_dup_mm(struct mm_struct *mm)
26*2d69fbf3SPaul Walmsley {
27*2d69fbf3SPaul Walmsley }
28*2d69fbf3SPaul Walmsley 
29*2d69fbf3SPaul Walmsley static inline void flush_cache_range(struct vm_area_struct *vma,
30*2d69fbf3SPaul Walmsley 				     unsigned long start,
31*2d69fbf3SPaul Walmsley 				     unsigned long end)
32*2d69fbf3SPaul Walmsley {
33*2d69fbf3SPaul Walmsley }
34*2d69fbf3SPaul Walmsley 
35*2d69fbf3SPaul Walmsley static inline void flush_cache_page(struct vm_area_struct *vma,
36*2d69fbf3SPaul Walmsley 				    unsigned long vmaddr,
37*2d69fbf3SPaul Walmsley 				    unsigned long pfn)
38*2d69fbf3SPaul Walmsley {
39*2d69fbf3SPaul Walmsley }
40*2d69fbf3SPaul Walmsley 
41*2d69fbf3SPaul Walmsley static inline void flush_dcache_mmap_lock(struct address_space *mapping)
42*2d69fbf3SPaul Walmsley {
43*2d69fbf3SPaul Walmsley }
44*2d69fbf3SPaul Walmsley 
45*2d69fbf3SPaul Walmsley static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
46*2d69fbf3SPaul Walmsley {
47*2d69fbf3SPaul Walmsley }
48*2d69fbf3SPaul Walmsley 
49*2d69fbf3SPaul Walmsley static inline void flush_icache_page(struct vm_area_struct *vma,
50*2d69fbf3SPaul Walmsley 				     struct page *page)
51*2d69fbf3SPaul Walmsley {
52*2d69fbf3SPaul Walmsley }
53*2d69fbf3SPaul Walmsley 
54*2d69fbf3SPaul Walmsley static inline void flush_cache_vmap(unsigned long start, unsigned long end)
55*2d69fbf3SPaul Walmsley {
56*2d69fbf3SPaul Walmsley }
57*2d69fbf3SPaul Walmsley 
58*2d69fbf3SPaul Walmsley static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
59*2d69fbf3SPaul Walmsley {
60*2d69fbf3SPaul Walmsley }
61*2d69fbf3SPaul Walmsley 
62*2d69fbf3SPaul Walmsley #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63*2d69fbf3SPaul Walmsley 	do { \
64*2d69fbf3SPaul Walmsley 		memcpy(dst, src, len); \
65*2d69fbf3SPaul Walmsley 		flush_icache_user_range(vma, page, vaddr, len); \
66*2d69fbf3SPaul Walmsley 	} while (0)
67*2d69fbf3SPaul Walmsley #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
68*2d69fbf3SPaul Walmsley 	memcpy(dst, src, len)
69fab957c1SPalmer Dabbelt 
70fab957c1SPalmer Dabbelt static inline void local_flush_icache_all(void)
71fab957c1SPalmer Dabbelt {
72fab957c1SPalmer Dabbelt 	asm volatile ("fence.i" ::: "memory");
73fab957c1SPalmer Dabbelt }
74fab957c1SPalmer Dabbelt 
7508f051edSAndrew Waterman #define PG_dcache_clean PG_arch_1
7608f051edSAndrew Waterman 
7708f051edSAndrew Waterman static inline void flush_dcache_page(struct page *page)
7808f051edSAndrew Waterman {
7908f051edSAndrew Waterman 	if (test_bit(PG_dcache_clean, &page->flags))
8008f051edSAndrew Waterman 		clear_bit(PG_dcache_clean, &page->flags);
8108f051edSAndrew Waterman }
8208f051edSAndrew Waterman 
8308f051edSAndrew Waterman /*
8408f051edSAndrew Waterman  * RISC-V doesn't have an instruction to flush parts of the instruction cache,
8508f051edSAndrew Waterman  * so instead we just flush the whole thing.
8608f051edSAndrew Waterman  */
8708f051edSAndrew Waterman #define flush_icache_range(start, end) flush_icache_all()
8808f051edSAndrew Waterman #define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
8908f051edSAndrew Waterman 
90fab957c1SPalmer Dabbelt #ifndef CONFIG_SMP
91fab957c1SPalmer Dabbelt 
9208f051edSAndrew Waterman #define flush_icache_all() local_flush_icache_all()
9308f051edSAndrew Waterman #define flush_icache_mm(mm, local) flush_icache_all()
94fab957c1SPalmer Dabbelt 
95fab957c1SPalmer Dabbelt #else /* CONFIG_SMP */
96fab957c1SPalmer Dabbelt 
9758de7754SGary Guo void flush_icache_all(void);
9808f051edSAndrew Waterman void flush_icache_mm(struct mm_struct *mm, bool local);
99fab957c1SPalmer Dabbelt 
100fab957c1SPalmer Dabbelt #endif /* CONFIG_SMP */
101fab957c1SPalmer Dabbelt 
102921ebd8fSAndrew Waterman /*
103921ebd8fSAndrew Waterman  * Bits in sys_riscv_flush_icache()'s flags argument.
104921ebd8fSAndrew Waterman  */
105921ebd8fSAndrew Waterman #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
106921ebd8fSAndrew Waterman #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
107921ebd8fSAndrew Waterman 
108fab957c1SPalmer Dabbelt #endif /* _ASM_RISCV_CACHEFLUSH_H */
109