xref: /linux/arch/parisc/include/asm/cacheflush.h (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2deae26bfSKyle McMartin #ifndef _PARISC_CACHEFLUSH_H
3deae26bfSKyle McMartin #define _PARISC_CACHEFLUSH_H
4deae26bfSKyle McMartin 
5deae26bfSKyle McMartin #include <linux/mm.h>
6210501aaSJohn David Anglin #include <linux/uaccess.h>
7b7d45818SJames Bottomley #include <asm/tlbflush.h>
8deae26bfSKyle McMartin 
9deae26bfSKyle McMartin /* The usual comment is "Caches aren't brain-dead on the <architecture>".
10deae26bfSKyle McMartin  * Unfortunately, that doesn't apply to PA-RISC. */
11deae26bfSKyle McMartin 
120a575497SHelge Deller #include <linux/jump_label.h>
130a575497SHelge Deller 
140a575497SHelge Deller DECLARE_STATIC_KEY_TRUE(parisc_has_cache);
150a575497SHelge Deller DECLARE_STATIC_KEY_TRUE(parisc_has_dcache);
160a575497SHelge Deller DECLARE_STATIC_KEY_TRUE(parisc_has_icache);
17deae26bfSKyle McMartin 
18deae26bfSKyle McMartin #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
19deae26bfSKyle McMartin 
20deae26bfSKyle McMartin void flush_user_icache_range_asm(unsigned long, unsigned long);
21deae26bfSKyle McMartin void flush_kernel_icache_range_asm(unsigned long, unsigned long);
22deae26bfSKyle McMartin void flush_user_dcache_range_asm(unsigned long, unsigned long);
23deae26bfSKyle McMartin void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
240adb24e0SJohn David Anglin void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
2539ade048SFabio M. De Francesco void flush_kernel_dcache_page_asm(const void *addr);
26deae26bfSKyle McMartin void flush_kernel_icache_page(void *);
27deae26bfSKyle McMartin 
28deae26bfSKyle McMartin /* Cache flush operations */
29deae26bfSKyle McMartin 
30deae26bfSKyle McMartin void flush_cache_all_local(void);
31deae26bfSKyle McMartin void flush_cache_all(void);
32deae26bfSKyle McMartin void flush_cache_mm(struct mm_struct *mm);
33deae26bfSKyle McMartin 
34deae26bfSKyle McMartin #define flush_kernel_dcache_range(start,size) \
35deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((start), (start)+(size));
36ef7cc35bSJames Bottomley 
37*72d95924SJohn David Anglin /* The only way to flush a vmap range is to flush whole cache */
38f358afc5SChristoph Hellwig #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
39316ec062SJohn David Anglin void flush_kernel_vmap_range(void *vaddr, int size);
40316ec062SJohn David Anglin void invalidate_kernel_vmap_range(void *vaddr, int size);
41deae26bfSKyle McMartin 
42*72d95924SJohn David Anglin void flush_cache_vmap(unsigned long start, unsigned long end);
437a92fc8bSAlexandre Ghiti #define flush_cache_vmap_early(start, end)	do { } while (0)
44*72d95924SJohn David Anglin void flush_cache_vunmap(unsigned long start, unsigned long end);
45deae26bfSKyle McMartin 
46e70bbca6SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio);
47e70bbca6SMatthew Wilcox (Oracle) #define flush_dcache_folio flush_dcache_folio
482d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
49e70bbca6SMatthew Wilcox (Oracle) static inline void flush_dcache_page(struct page *page)
50e70bbca6SMatthew Wilcox (Oracle) {
51e70bbca6SMatthew Wilcox (Oracle) 	flush_dcache_folio(page_folio(page));
52e70bbca6SMatthew Wilcox (Oracle) }
53deae26bfSKyle McMartin 
54b93b0163SMatthew Wilcox #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
55b93b0163SMatthew Wilcox #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
5661e150fbSHelge Deller #define flush_dcache_mmap_lock_irqsave(mapping, flags)		\
5761e150fbSHelge Deller 		xa_lock_irqsave(&mapping->i_pages, flags)
5861e150fbSHelge Deller #define flush_dcache_mmap_unlock_irqrestore(mapping, flags)	\
5961e150fbSHelge Deller 		xa_unlock_irqrestore(&mapping->i_pages, flags)
60deae26bfSKyle McMartin 
61e70bbca6SMatthew Wilcox (Oracle) void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
62e70bbca6SMatthew Wilcox (Oracle) 		unsigned int nr);
63203b7b6aSMatthew Wilcox (Oracle) #define flush_icache_pages flush_icache_pages
64deae26bfSKyle McMartin 
65deae26bfSKyle McMartin #define flush_icache_range(s,e)		do { 		\
66deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm(s,e); 		\
67deae26bfSKyle McMartin 	flush_kernel_icache_range_asm(s,e); 		\
68deae26bfSKyle McMartin } while (0)
69deae26bfSKyle McMartin 
702de8b4ccSJohn David Anglin void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
712de8b4ccSJohn David Anglin 		unsigned long user_vaddr, void *dst, void *src, int len);
722de8b4ccSJohn David Anglin void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
732de8b4ccSJohn David Anglin 		unsigned long user_vaddr, void *dst, void *src, int len);
742de8b4ccSJohn David Anglin void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
752de8b4ccSJohn David Anglin 		unsigned long pfn);
76deae26bfSKyle McMartin void flush_cache_range(struct vm_area_struct *vma,
77deae26bfSKyle McMartin 		unsigned long start, unsigned long end);
78deae26bfSKyle McMartin 
79deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_ANON_PAGE
802de8b4ccSJohn David Anglin void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
81deae26bfSKyle McMartin 
827438f363SIra Weiny #define ARCH_HAS_FLUSH_ON_KUNMAP
83*72d95924SJohn David Anglin void kunmap_flush_on_unmap(const void *addr);
84deae26bfSKyle McMartin 
85deae26bfSKyle McMartin #endif /* _PARISC_CACHEFLUSH_H */
86deae26bfSKyle McMartin 
87