xref: /linux/arch/parisc/include/asm/cacheflush.h (revision ef7cc35b0ee03431731186320b18e5da585341ff)
1deae26bfSKyle McMartin #ifndef _PARISC_CACHEFLUSH_H
2deae26bfSKyle McMartin #define _PARISC_CACHEFLUSH_H
3deae26bfSKyle McMartin 
4deae26bfSKyle McMartin #include <linux/mm.h>
5deae26bfSKyle McMartin 
6deae26bfSKyle McMartin /* The usual comment is "Caches aren't brain-dead on the <architecture>".
7deae26bfSKyle McMartin  * Unfortunately, that doesn't apply to PA-RISC. */
8deae26bfSKyle McMartin 
9deae26bfSKyle McMartin /* Internal implementation */
10deae26bfSKyle McMartin void flush_data_cache_local(void *);  /* flushes local data-cache only */
11deae26bfSKyle McMartin void flush_instruction_cache_local(void *); /* flushes local code-cache only */
12deae26bfSKyle McMartin #ifdef CONFIG_SMP
13deae26bfSKyle McMartin void flush_data_cache(void); /* flushes data-cache only (all processors) */
14deae26bfSKyle McMartin void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
15deae26bfSKyle McMartin #else
16deae26bfSKyle McMartin #define flush_data_cache() flush_data_cache_local(NULL)
17deae26bfSKyle McMartin #define flush_instruction_cache() flush_instruction_cache_local(NULL)
18deae26bfSKyle McMartin #endif
19deae26bfSKyle McMartin 
20deae26bfSKyle McMartin #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
21deae26bfSKyle McMartin 
22deae26bfSKyle McMartin void flush_user_icache_range_asm(unsigned long, unsigned long);
23deae26bfSKyle McMartin void flush_kernel_icache_range_asm(unsigned long, unsigned long);
24deae26bfSKyle McMartin void flush_user_dcache_range_asm(unsigned long, unsigned long);
25deae26bfSKyle McMartin void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
26deae26bfSKyle McMartin void flush_kernel_dcache_page_asm(void *);
27deae26bfSKyle McMartin void flush_kernel_icache_page(void *);
28deae26bfSKyle McMartin void flush_user_dcache_page(unsigned long);
29deae26bfSKyle McMartin void flush_user_icache_page(unsigned long);
30deae26bfSKyle McMartin void flush_user_dcache_range(unsigned long, unsigned long);
31deae26bfSKyle McMartin void flush_user_icache_range(unsigned long, unsigned long);
32deae26bfSKyle McMartin 
33deae26bfSKyle McMartin /* Cache flush operations */
34deae26bfSKyle McMartin 
35deae26bfSKyle McMartin void flush_cache_all_local(void);
36deae26bfSKyle McMartin void flush_cache_all(void);
37deae26bfSKyle McMartin void flush_cache_mm(struct mm_struct *mm);
38deae26bfSKyle McMartin 
39deae26bfSKyle McMartin #define flush_kernel_dcache_range(start,size) \
40deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((start), (start)+(size));
41*ef7cc35bSJames Bottomley /* vmap range flushes and invalidates.  Architecturally, we don't need
42*ef7cc35bSJames Bottomley  * the invalidate, because the CPU should refuse to speculate once an
43*ef7cc35bSJames Bottomley  * area has been flushed, so invalidate is left empty */
44*ef7cc35bSJames Bottomley static inline void flush_kernel_vmap_range(void *vaddr, int size)
45*ef7cc35bSJames Bottomley {
46*ef7cc35bSJames Bottomley 	unsigned long start = (unsigned long)vaddr;
47*ef7cc35bSJames Bottomley 
48*ef7cc35bSJames Bottomley 	flush_kernel_dcache_range_asm(start, start + size);
49*ef7cc35bSJames Bottomley }
50*ef7cc35bSJames Bottomley static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
51*ef7cc35bSJames Bottomley {
52*ef7cc35bSJames Bottomley }
53deae26bfSKyle McMartin 
54deae26bfSKyle McMartin #define flush_cache_vmap(start, end)		flush_cache_all()
55deae26bfSKyle McMartin #define flush_cache_vunmap(start, end)		flush_cache_all()
56deae26bfSKyle McMartin 
572d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
58deae26bfSKyle McMartin extern void flush_dcache_page(struct page *page);
59deae26bfSKyle McMartin 
60deae26bfSKyle McMartin #define flush_dcache_mmap_lock(mapping) \
61deae26bfSKyle McMartin 	spin_lock_irq(&(mapping)->tree_lock)
62deae26bfSKyle McMartin #define flush_dcache_mmap_unlock(mapping) \
63deae26bfSKyle McMartin 	spin_unlock_irq(&(mapping)->tree_lock)
64deae26bfSKyle McMartin 
65deae26bfSKyle McMartin #define flush_icache_page(vma,page)	do { 		\
66deae26bfSKyle McMartin 	flush_kernel_dcache_page(page);			\
67deae26bfSKyle McMartin 	flush_kernel_icache_page(page_address(page)); 	\
68deae26bfSKyle McMartin } while (0)
69deae26bfSKyle McMartin 
70deae26bfSKyle McMartin #define flush_icache_range(s,e)		do { 		\
71deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm(s,e); 		\
72deae26bfSKyle McMartin 	flush_kernel_icache_range_asm(s,e); 		\
73deae26bfSKyle McMartin } while (0)
74deae26bfSKyle McMartin 
75deae26bfSKyle McMartin #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
76deae26bfSKyle McMartin do { \
77deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
78deae26bfSKyle McMartin 	memcpy(dst, src, len); \
79deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
80deae26bfSKyle McMartin } while (0)
81deae26bfSKyle McMartin 
82deae26bfSKyle McMartin #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
83deae26bfSKyle McMartin do { \
84deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
85deae26bfSKyle McMartin 	memcpy(dst, src, len); \
86deae26bfSKyle McMartin } while (0)
87deae26bfSKyle McMartin 
88deae26bfSKyle McMartin void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
89deae26bfSKyle McMartin void flush_cache_range(struct vm_area_struct *vma,
90deae26bfSKyle McMartin 		unsigned long start, unsigned long end);
91deae26bfSKyle McMartin 
92deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_ANON_PAGE
93deae26bfSKyle McMartin static inline void
94deae26bfSKyle McMartin flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
95deae26bfSKyle McMartin {
96deae26bfSKyle McMartin 	if (PageAnon(page))
97deae26bfSKyle McMartin 		flush_user_dcache_page(vmaddr);
98deae26bfSKyle McMartin }
99deae26bfSKyle McMartin 
100deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
101deae26bfSKyle McMartin void flush_kernel_dcache_page_addr(void *addr);
102deae26bfSKyle McMartin static inline void flush_kernel_dcache_page(struct page *page)
103deae26bfSKyle McMartin {
104deae26bfSKyle McMartin 	flush_kernel_dcache_page_addr(page_address(page));
105deae26bfSKyle McMartin }
106deae26bfSKyle McMartin 
107deae26bfSKyle McMartin #ifdef CONFIG_DEBUG_RODATA
108deae26bfSKyle McMartin void mark_rodata_ro(void);
109deae26bfSKyle McMartin #endif
110deae26bfSKyle McMartin 
111deae26bfSKyle McMartin #ifdef CONFIG_PA8X00
112deae26bfSKyle McMartin /* Only pa8800, pa8900 needs this */
113bb735019SKyle McMartin 
114bb735019SKyle McMartin #include <asm/kmap_types.h>
115bb735019SKyle McMartin 
116deae26bfSKyle McMartin #define ARCH_HAS_KMAP
117deae26bfSKyle McMartin 
118deae26bfSKyle McMartin void kunmap_parisc(void *addr);
119deae26bfSKyle McMartin 
120deae26bfSKyle McMartin static inline void *kmap(struct page *page)
121deae26bfSKyle McMartin {
122deae26bfSKyle McMartin 	might_sleep();
123deae26bfSKyle McMartin 	return page_address(page);
124deae26bfSKyle McMartin }
125deae26bfSKyle McMartin 
126deae26bfSKyle McMartin #define kunmap(page)			kunmap_parisc(page_address(page))
127deae26bfSKyle McMartin 
128deae26bfSKyle McMartin #define kmap_atomic(page, idx)		page_address(page)
129deae26bfSKyle McMartin 
130deae26bfSKyle McMartin #define kunmap_atomic(addr, idx)	kunmap_parisc(addr)
131deae26bfSKyle McMartin 
132deae26bfSKyle McMartin #define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
133deae26bfSKyle McMartin #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
134deae26bfSKyle McMartin #endif
135deae26bfSKyle McMartin 
136deae26bfSKyle McMartin #endif /* _PARISC_CACHEFLUSH_H */
137deae26bfSKyle McMartin 
138