1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_CACHEFLUSH_H
7 #define _ASM_RISCV_CACHEFLUSH_H
8
9 #include <linux/mm.h>
10
local_flush_icache_all(void)11 static inline void local_flush_icache_all(void)
12 {
13 asm volatile ("fence.i" ::: "memory");
14 }
15
local_flush_icache_range(unsigned long start,unsigned long end)16 static inline void local_flush_icache_range(unsigned long start,
17 unsigned long end)
18 {
19 local_flush_icache_all();
20 }
21
22 #define PG_dcache_clean PG_arch_1
23
flush_dcache_folio(struct folio * folio)24 static inline void flush_dcache_folio(struct folio *folio)
25 {
26 if (test_bit(PG_dcache_clean, &folio->flags))
27 clear_bit(PG_dcache_clean, &folio->flags);
28 }
29 #define flush_dcache_folio flush_dcache_folio
30 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
31
flush_dcache_page(struct page * page)32 static inline void flush_dcache_page(struct page *page)
33 {
34 flush_dcache_folio(page_folio(page));
35 }
36
37 #define flush_icache_user_page(vma, pg, addr, len) \
38 do { \
39 if (vma->vm_flags & VM_EXEC) \
40 flush_icache_mm(vma->vm_mm, 0); \
41 } while (0)
42
43 #ifdef CONFIG_64BIT
44 extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
45 extern char _end[];
46 #define flush_cache_vmap flush_cache_vmap
flush_cache_vmap(unsigned long start,unsigned long end)47 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
48 {
49 if (is_vmalloc_or_module_addr((void *)start)) {
50 int i;
51
52 /*
53 * We don't care if concurrently a cpu resets this value since
54 * the only place this can happen is in handle_exception() where
55 * an sfence.vma is emitted.
56 */
57 for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
58 new_vmalloc[i] = -1ULL;
59 }
60 }
61 #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
62 #endif
63
64 #ifndef CONFIG_SMP
65
66 #define flush_icache_all() local_flush_icache_all()
67 #define flush_icache_mm(mm, local) flush_icache_all()
68
69 #else /* CONFIG_SMP */
70
71 void flush_icache_all(void);
72 void flush_icache_mm(struct mm_struct *mm, bool local);
73
74 #endif /* CONFIG_SMP */
75
76 /*
77 * RISC-V doesn't have an instruction to flush parts of the instruction cache,
78 * so instead we just flush the whole thing.
79 */
80 #define flush_icache_range flush_icache_range
flush_icache_range(unsigned long start,unsigned long end)81 static inline void flush_icache_range(unsigned long start, unsigned long end)
82 {
83 flush_icache_all();
84 }
85
86 extern unsigned int riscv_cbom_block_size;
87 extern unsigned int riscv_cboz_block_size;
88 extern unsigned int riscv_cbop_block_size;
89 void riscv_init_cbo_blocksizes(void);
90
91 #ifdef CONFIG_RISCV_DMA_NONCOHERENT
92 void riscv_noncoherent_supported(void);
93 void __init riscv_set_dma_cache_alignment(void);
94 #else
riscv_noncoherent_supported(void)95 static inline void riscv_noncoherent_supported(void) {}
riscv_set_dma_cache_alignment(void)96 static inline void riscv_set_dma_cache_alignment(void) {}
97 #endif
98
99 /*
100 * Bits in sys_riscv_flush_icache()'s flags argument.
101 */
102 #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
103 #define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
104
105 #include <asm-generic/cacheflush.h>
106
107 #endif /* _ASM_RISCV_CACHEFLUSH_H */
108