xref: /linux/arch/sparc/include/asm/cacheflush_32.h (revision 06d07429858317ded2db7986113a9e0129cd599b)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a439fe51SSam Ravnborg #ifndef _SPARC_CACHEFLUSH_H
3a439fe51SSam Ravnborg #define _SPARC_CACHEFLUSH_H
4a439fe51SSam Ravnborg 
5665f6402SMatthew Wilcox (Oracle) #include <linux/page-flags.h>
65d83d666SDavid S. Miller #include <asm/cachetlb_32.h>
7a439fe51SSam Ravnborg 
85d83d666SDavid S. Miller #define flush_cache_all() \
95d83d666SDavid S. Miller 	sparc32_cachetlb_ops->cache_all()
105d83d666SDavid S. Miller #define flush_cache_mm(mm) \
115d83d666SDavid S. Miller 	sparc32_cachetlb_ops->cache_mm(mm)
125d83d666SDavid S. Miller #define flush_cache_dup_mm(mm) \
135d83d666SDavid S. Miller 	sparc32_cachetlb_ops->cache_mm(mm)
145d83d666SDavid S. Miller #define flush_cache_range(vma,start,end) \
155d83d666SDavid S. Miller 	sparc32_cachetlb_ops->cache_range(vma, start, end)
165d83d666SDavid S. Miller #define flush_cache_page(vma,addr,pfn) \
175d83d666SDavid S. Miller 	sparc32_cachetlb_ops->cache_page(vma, addr)
18a439fe51SSam Ravnborg #define flush_icache_range(start, end)		do { } while (0)
19a439fe51SSam Ravnborg 
20a439fe51SSam Ravnborg #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
21a439fe51SSam Ravnborg 	do {							\
22a439fe51SSam Ravnborg 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
23a439fe51SSam Ravnborg 		memcpy(dst, src, len);				\
24a439fe51SSam Ravnborg 	} while (0)
25a439fe51SSam Ravnborg #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
26a439fe51SSam Ravnborg 	do {							\
27a439fe51SSam Ravnborg 		flush_cache_page(vma, vaddr, page_to_pfn(page));\
28a439fe51SSam Ravnborg 		memcpy(dst, src, len);				\
29a439fe51SSam Ravnborg 	} while (0)
30a439fe51SSam Ravnborg 
315d83d666SDavid S. Miller #define __flush_page_to_ram(addr) \
325d83d666SDavid S. Miller 	sparc32_cachetlb_ops->page_to_ram(addr)
335d83d666SDavid S. Miller #define flush_sig_insns(mm,insn_addr) \
345d83d666SDavid S. Miller 	sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
355d83d666SDavid S. Miller #define flush_page_for_dma(addr) \
365d83d666SDavid S. Miller 	sparc32_cachetlb_ops->page_for_dma(addr)
37a439fe51SSam Ravnborg 
38f05a6865SSam Ravnborg void sparc_flush_page_to_ram(struct page *page);
39665f6402SMatthew Wilcox (Oracle) void sparc_flush_folio_to_ram(struct folio *folio);
40a439fe51SSam Ravnborg 
412d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
42665f6402SMatthew Wilcox (Oracle) #define flush_dcache_folio(folio)		sparc_flush_folio_to_ram(folio)
flush_dcache_page(struct page * page)43665f6402SMatthew Wilcox (Oracle) static inline void flush_dcache_page(struct page *page)
44665f6402SMatthew Wilcox (Oracle) {
45665f6402SMatthew Wilcox (Oracle) 	flush_dcache_folio(page_folio(page));
46665f6402SMatthew Wilcox (Oracle) }
47a439fe51SSam Ravnborg #define flush_dcache_mmap_lock(mapping)		do { } while (0)
48a439fe51SSam Ravnborg #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
49a439fe51SSam Ravnborg 
50a439fe51SSam Ravnborg #define flush_cache_vmap(start, end)		flush_cache_all()
51*7a92fc8bSAlexandre Ghiti #define flush_cache_vmap_early(start, end)	do { } while (0)
52a439fe51SSam Ravnborg #define flush_cache_vunmap(start, end)		flush_cache_all()
53a439fe51SSam Ravnborg 
54d550bbd4SDavid Howells /* When a context switch happens we must flush all user windows so that
55d550bbd4SDavid Howells  * the windows of the current process are flushed onto its stack. This
56d550bbd4SDavid Howells  * way the windows are all clean for the next process and the stack
57d550bbd4SDavid Howells  * frames are up to date.
58d550bbd4SDavid Howells  */
59f05a6865SSam Ravnborg void flush_user_windows(void);
60f05a6865SSam Ravnborg void kill_user_windows(void);
61f05a6865SSam Ravnborg void flushw_all(void);
62d550bbd4SDavid Howells 
63a439fe51SSam Ravnborg #endif /* _SPARC_CACHEFLUSH_H */
64