xref: /linux/arch/sh/include/asm/cacheflush.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f15cbe6fSPaul Mundt #ifndef __ASM_SH_CACHEFLUSH_H
3f15cbe6fSPaul Mundt #define __ASM_SH_CACHEFLUSH_H
4f15cbe6fSPaul Mundt 
537443ef3SPaul Mundt #include <linux/mm.h>
6f9bd71f2SPaul Mundt 
7f9bd71f2SPaul Mundt /*
8f9bd71f2SPaul Mundt  * Cache flushing:
9f9bd71f2SPaul Mundt  *
10f9bd71f2SPaul Mundt  *  - flush_cache_all() flushes entire cache
11f9bd71f2SPaul Mundt  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
12f9bd71f2SPaul Mundt  *  - flush_cache_dup mm(mm) handles cache flushing when forking
13f9bd71f2SPaul Mundt  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
14f9bd71f2SPaul Mundt  *  - flush_cache_range(vma, start, end) flushes a range of pages
15f9bd71f2SPaul Mundt  *
16157efa29SMatthew Wilcox (Oracle)  *  - flush_dcache_folio(folio) flushes(wback&invalidates) a folio for dcache
17f9bd71f2SPaul Mundt  *  - flush_icache_range(start, end) flushes(invalidates) a range for icache
18157efa29SMatthew Wilcox (Oracle)  *  - flush_icache_pages(vma, pg, nr) flushes(invalidates) pages for icache
19f9bd71f2SPaul Mundt  *  - flush_cache_sigtramp(vaddr) flushes the signal trampoline
20f9bd71f2SPaul Mundt  */
21f26b2a56SPaul Mundt extern void (*local_flush_cache_all)(void *args);
22f26b2a56SPaul Mundt extern void (*local_flush_cache_mm)(void *args);
23f26b2a56SPaul Mundt extern void (*local_flush_cache_dup_mm)(void *args);
24f26b2a56SPaul Mundt extern void (*local_flush_cache_page)(void *args);
25f26b2a56SPaul Mundt extern void (*local_flush_cache_range)(void *args);
26157efa29SMatthew Wilcox (Oracle) extern void (*local_flush_dcache_folio)(void *args);
27f26b2a56SPaul Mundt extern void (*local_flush_icache_range)(void *args);
28157efa29SMatthew Wilcox (Oracle) extern void (*local_flush_icache_folio)(void *args);
29f26b2a56SPaul Mundt extern void (*local_flush_cache_sigtramp)(void *args);
30f26b2a56SPaul Mundt 
cache_noop(void * args)31f26b2a56SPaul Mundt static inline void cache_noop(void *args) { }
32f9bd71f2SPaul Mundt 
33f9bd71f2SPaul Mundt extern void (*__flush_wback_region)(void *start, int size);
34f9bd71f2SPaul Mundt extern void (*__flush_purge_region)(void *start, int size);
35f9bd71f2SPaul Mundt extern void (*__flush_invalidate_region)(void *start, int size);
36f15cbe6fSPaul Mundt 
37f26b2a56SPaul Mundt extern void flush_cache_all(void);
38f26b2a56SPaul Mundt extern void flush_cache_mm(struct mm_struct *mm);
39f26b2a56SPaul Mundt extern void flush_cache_dup_mm(struct mm_struct *mm);
40f26b2a56SPaul Mundt extern void flush_cache_page(struct vm_area_struct *vma,
41f26b2a56SPaul Mundt 				unsigned long addr, unsigned long pfn);
42f26b2a56SPaul Mundt extern void flush_cache_range(struct vm_area_struct *vma,
43f26b2a56SPaul Mundt 				 unsigned long start, unsigned long end);
442d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
45157efa29SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio);
46157efa29SMatthew Wilcox (Oracle) #define flush_dcache_folio flush_dcache_folio
flush_dcache_page(struct page * page)47157efa29SMatthew Wilcox (Oracle) static inline void flush_dcache_page(struct page *page)
48157efa29SMatthew Wilcox (Oracle) {
49157efa29SMatthew Wilcox (Oracle) 	flush_dcache_folio(page_folio(page));
50157efa29SMatthew Wilcox (Oracle) }
51157efa29SMatthew Wilcox (Oracle) 
52f26b2a56SPaul Mundt extern void flush_icache_range(unsigned long start, unsigned long end);
53952ec41cSChristoph Hellwig #define flush_icache_user_range flush_icache_range
54157efa29SMatthew Wilcox (Oracle) void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
55157efa29SMatthew Wilcox (Oracle) 		unsigned int nr);
56203b7b6aSMatthew Wilcox (Oracle) #define flush_icache_pages flush_icache_pages
57f26b2a56SPaul Mundt extern void flush_cache_sigtramp(unsigned long address);
58f26b2a56SPaul Mundt 
59f26b2a56SPaul Mundt struct flusher_data {
60f26b2a56SPaul Mundt 	struct vm_area_struct *vma;
61f26b2a56SPaul Mundt 	unsigned long addr1, addr2;
62f26b2a56SPaul Mundt };
63f26b2a56SPaul Mundt 
64c0fe478dSPaul Mundt #define ARCH_HAS_FLUSH_ANON_PAGE
65c0fe478dSPaul Mundt extern void __flush_anon_page(struct page *page, unsigned long);
66c0fe478dSPaul Mundt 
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)67c0fe478dSPaul Mundt static inline void flush_anon_page(struct vm_area_struct *vma,
68c0fe478dSPaul Mundt 				   struct page *page, unsigned long vmaddr)
69c0fe478dSPaul Mundt {
70c0fe478dSPaul Mundt 	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
71c0fe478dSPaul Mundt 		__flush_anon_page(page, vmaddr);
72c0fe478dSPaul Mundt }
73f358afc5SChristoph Hellwig 
74f358afc5SChristoph Hellwig #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
flush_kernel_vmap_range(void * addr,int size)75c9334f60SJames Bottomley static inline void flush_kernel_vmap_range(void *addr, int size)
76c9334f60SJames Bottomley {
77c9334f60SJames Bottomley 	__flush_wback_region(addr, size);
78c9334f60SJames Bottomley }
invalidate_kernel_vmap_range(void * addr,int size)79c9334f60SJames Bottomley static inline void invalidate_kernel_vmap_range(void *addr, int size)
80c9334f60SJames Bottomley {
81c9334f60SJames Bottomley 	__flush_invalidate_region(addr, size);
82c9334f60SJames Bottomley }
83c0fe478dSPaul Mundt 
84f15cbe6fSPaul Mundt extern void copy_to_user_page(struct vm_area_struct *vma,
85f15cbe6fSPaul Mundt 	struct page *page, unsigned long vaddr, void *dst, const void *src,
86f15cbe6fSPaul Mundt 	unsigned long len);
87f15cbe6fSPaul Mundt 
88f15cbe6fSPaul Mundt extern void copy_from_user_page(struct vm_area_struct *vma,
89f15cbe6fSPaul Mundt 	struct page *page, unsigned long vaddr, void *dst, const void *src,
90f15cbe6fSPaul Mundt 	unsigned long len);
91f15cbe6fSPaul Mundt 
92b07f6b32SPaul Mundt #define flush_cache_vmap(start, end)		local_flush_cache_all(NULL)
937a92fc8bSAlexandre Ghiti #define flush_cache_vmap_early(start, end)	do { } while (0)
94b07f6b32SPaul Mundt #define flush_cache_vunmap(start, end)		local_flush_cache_all(NULL)
95f15cbe6fSPaul Mundt 
967fbb2d3bSPaul Mundt #define flush_dcache_mmap_lock(mapping)		do { } while (0)
977fbb2d3bSPaul Mundt #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
987fbb2d3bSPaul Mundt 
992739742cSPaul Mundt void kmap_coherent_init(void);
1002739742cSPaul Mundt void *kmap_coherent(struct page *page, unsigned long addr);
1010906a3adSPaul Mundt void kunmap_coherent(void *kvaddr);
1022739742cSPaul Mundt 
10355661fc1SPaul Mundt #define PG_dcache_clean	PG_arch_1
104dde5e3ffSPaul Mundt 
105ecba1060SPaul Mundt void cpu_cache_init(void);
106ecba1060SPaul Mundt 
107*fcd9a892SGeert Uytterhoeven void __weak l2_cache_init(void);
108*fcd9a892SGeert Uytterhoeven 
109*fcd9a892SGeert Uytterhoeven void __weak j2_cache_init(void);
110*fcd9a892SGeert Uytterhoeven void __weak sh2_cache_init(void);
111*fcd9a892SGeert Uytterhoeven void __weak sh2a_cache_init(void);
112*fcd9a892SGeert Uytterhoeven void __weak sh3_cache_init(void);
113*fcd9a892SGeert Uytterhoeven void __weak shx3_cache_init(void);
114*fcd9a892SGeert Uytterhoeven void __weak sh4_cache_init(void);
115*fcd9a892SGeert Uytterhoeven void __weak sh7705_cache_init(void);
116*fcd9a892SGeert Uytterhoeven 
117*fcd9a892SGeert Uytterhoeven void __weak sh4__flush_region_init(void);
118*fcd9a892SGeert Uytterhoeven 
sh_cacheop_vaddr(void * vaddr)11947fcae0dSChristoph Hellwig static inline void *sh_cacheop_vaddr(void *vaddr)
12047fcae0dSChristoph Hellwig {
12147fcae0dSChristoph Hellwig 	if (__in_29bit_mode())
12247fcae0dSChristoph Hellwig 		vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
12347fcae0dSChristoph Hellwig 	return vaddr;
12447fcae0dSChristoph Hellwig }
12547fcae0dSChristoph Hellwig 
126f15cbe6fSPaul Mundt #endif /* __ASM_SH_CACHEFLUSH_H */
127