xref: /linux/arch/parisc/include/asm/cacheflush.h (revision 210501aa570fdaa8b06e56fd1c04f31f2d3f368b)
1deae26bfSKyle McMartin #ifndef _PARISC_CACHEFLUSH_H
2deae26bfSKyle McMartin #define _PARISC_CACHEFLUSH_H
3deae26bfSKyle McMartin 
4deae26bfSKyle McMartin #include <linux/mm.h>
5*210501aaSJohn David Anglin #include <linux/uaccess.h>
6deae26bfSKyle McMartin 
7deae26bfSKyle McMartin /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8deae26bfSKyle McMartin  * Unfortunately, that doesn't apply to PA-RISC. */
9deae26bfSKyle McMartin 
10deae26bfSKyle McMartin /* Internal implementation */
11deae26bfSKyle McMartin void flush_data_cache_local(void *);  /* flushes local data-cache only */
12deae26bfSKyle McMartin void flush_instruction_cache_local(void *); /* flushes local code-cache only */
13deae26bfSKyle McMartin #ifdef CONFIG_SMP
14deae26bfSKyle McMartin void flush_data_cache(void); /* flushes data-cache only (all processors) */
15deae26bfSKyle McMartin void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
16deae26bfSKyle McMartin #else
17deae26bfSKyle McMartin #define flush_data_cache() flush_data_cache_local(NULL)
18deae26bfSKyle McMartin #define flush_instruction_cache() flush_instruction_cache_local(NULL)
19deae26bfSKyle McMartin #endif
20deae26bfSKyle McMartin 
21deae26bfSKyle McMartin #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
22deae26bfSKyle McMartin 
23deae26bfSKyle McMartin void flush_user_icache_range_asm(unsigned long, unsigned long);
24deae26bfSKyle McMartin void flush_kernel_icache_range_asm(unsigned long, unsigned long);
25deae26bfSKyle McMartin void flush_user_dcache_range_asm(unsigned long, unsigned long);
26deae26bfSKyle McMartin void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
27deae26bfSKyle McMartin void flush_kernel_dcache_page_asm(void *);
28deae26bfSKyle McMartin void flush_kernel_icache_page(void *);
29deae26bfSKyle McMartin void flush_user_dcache_page(unsigned long);
30deae26bfSKyle McMartin void flush_user_icache_page(unsigned long);
31deae26bfSKyle McMartin void flush_user_dcache_range(unsigned long, unsigned long);
32deae26bfSKyle McMartin void flush_user_icache_range(unsigned long, unsigned long);
33deae26bfSKyle McMartin 
34deae26bfSKyle McMartin /* Cache flush operations */
35deae26bfSKyle McMartin 
36deae26bfSKyle McMartin void flush_cache_all_local(void);
37deae26bfSKyle McMartin void flush_cache_all(void);
38deae26bfSKyle McMartin void flush_cache_mm(struct mm_struct *mm);
39deae26bfSKyle McMartin 
40deae26bfSKyle McMartin #define flush_kernel_dcache_range(start,size) \
41deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((start), (start)+(size));
42ef7cc35bSJames Bottomley /* vmap range flushes and invalidates.  Architecturally, we don't need
43ef7cc35bSJames Bottomley  * the invalidate, because the CPU should refuse to speculate once an
44ef7cc35bSJames Bottomley  * area has been flushed, so invalidate is left empty */
45ef7cc35bSJames Bottomley static inline void flush_kernel_vmap_range(void *vaddr, int size)
46ef7cc35bSJames Bottomley {
47ef7cc35bSJames Bottomley 	unsigned long start = (unsigned long)vaddr;
48ef7cc35bSJames Bottomley 
49ef7cc35bSJames Bottomley 	flush_kernel_dcache_range_asm(start, start + size);
50ef7cc35bSJames Bottomley }
51ef7cc35bSJames Bottomley static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
52ef7cc35bSJames Bottomley {
53ef7cc35bSJames Bottomley }
54deae26bfSKyle McMartin 
55deae26bfSKyle McMartin #define flush_cache_vmap(start, end)		flush_cache_all()
56deae26bfSKyle McMartin #define flush_cache_vunmap(start, end)		flush_cache_all()
57deae26bfSKyle McMartin 
582d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
59deae26bfSKyle McMartin extern void flush_dcache_page(struct page *page);
60deae26bfSKyle McMartin 
61deae26bfSKyle McMartin #define flush_dcache_mmap_lock(mapping) \
62deae26bfSKyle McMartin 	spin_lock_irq(&(mapping)->tree_lock)
63deae26bfSKyle McMartin #define flush_dcache_mmap_unlock(mapping) \
64deae26bfSKyle McMartin 	spin_unlock_irq(&(mapping)->tree_lock)
65deae26bfSKyle McMartin 
66deae26bfSKyle McMartin #define flush_icache_page(vma,page)	do { 		\
67deae26bfSKyle McMartin 	flush_kernel_dcache_page(page);			\
68deae26bfSKyle McMartin 	flush_kernel_icache_page(page_address(page)); 	\
69deae26bfSKyle McMartin } while (0)
70deae26bfSKyle McMartin 
71deae26bfSKyle McMartin #define flush_icache_range(s,e)		do { 		\
72deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm(s,e); 		\
73deae26bfSKyle McMartin 	flush_kernel_icache_range_asm(s,e); 		\
74deae26bfSKyle McMartin } while (0)
75deae26bfSKyle McMartin 
76deae26bfSKyle McMartin #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
77deae26bfSKyle McMartin do { \
78deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
79deae26bfSKyle McMartin 	memcpy(dst, src, len); \
80deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
81deae26bfSKyle McMartin } while (0)
82deae26bfSKyle McMartin 
83deae26bfSKyle McMartin #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
84deae26bfSKyle McMartin do { \
85deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
86deae26bfSKyle McMartin 	memcpy(dst, src, len); \
87deae26bfSKyle McMartin } while (0)
88deae26bfSKyle McMartin 
89deae26bfSKyle McMartin void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
90deae26bfSKyle McMartin void flush_cache_range(struct vm_area_struct *vma,
91deae26bfSKyle McMartin 		unsigned long start, unsigned long end);
92deae26bfSKyle McMartin 
93deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_ANON_PAGE
94deae26bfSKyle McMartin static inline void
95deae26bfSKyle McMartin flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
96deae26bfSKyle McMartin {
97deae26bfSKyle McMartin 	if (PageAnon(page))
98deae26bfSKyle McMartin 		flush_user_dcache_page(vmaddr);
99deae26bfSKyle McMartin }
100deae26bfSKyle McMartin 
101deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
102deae26bfSKyle McMartin void flush_kernel_dcache_page_addr(void *addr);
103deae26bfSKyle McMartin static inline void flush_kernel_dcache_page(struct page *page)
104deae26bfSKyle McMartin {
105deae26bfSKyle McMartin 	flush_kernel_dcache_page_addr(page_address(page));
106deae26bfSKyle McMartin }
107deae26bfSKyle McMartin 
108deae26bfSKyle McMartin #ifdef CONFIG_DEBUG_RODATA
109deae26bfSKyle McMartin void mark_rodata_ro(void);
110deae26bfSKyle McMartin #endif
111deae26bfSKyle McMartin 
112deae26bfSKyle McMartin #ifdef CONFIG_PA8X00
113deae26bfSKyle McMartin /* Only pa8800, pa8900 needs this */
114bb735019SKyle McMartin 
115bb735019SKyle McMartin #include <asm/kmap_types.h>
116bb735019SKyle McMartin 
117deae26bfSKyle McMartin #define ARCH_HAS_KMAP
118deae26bfSKyle McMartin 
119deae26bfSKyle McMartin void kunmap_parisc(void *addr);
120deae26bfSKyle McMartin 
121deae26bfSKyle McMartin static inline void *kmap(struct page *page)
122deae26bfSKyle McMartin {
123deae26bfSKyle McMartin 	might_sleep();
124deae26bfSKyle McMartin 	return page_address(page);
125deae26bfSKyle McMartin }
126deae26bfSKyle McMartin 
127deae26bfSKyle McMartin #define kunmap(page)			kunmap_parisc(page_address(page))
128deae26bfSKyle McMartin 
129*210501aaSJohn David Anglin static inline void *kmap_atomic(struct page *page, enum km_type idx)
130*210501aaSJohn David Anglin {
131*210501aaSJohn David Anglin 	pagefault_disable();
132*210501aaSJohn David Anglin 	return page_address(page);
133*210501aaSJohn David Anglin }
134deae26bfSKyle McMartin 
135*210501aaSJohn David Anglin static inline void kunmap_atomic(void *addr, enum km_type idx)
136*210501aaSJohn David Anglin {
137*210501aaSJohn David Anglin 	kunmap_parisc(addr);
138*210501aaSJohn David Anglin 	pagefault_enable();
139*210501aaSJohn David Anglin }
140deae26bfSKyle McMartin 
141*210501aaSJohn David Anglin #define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)
142*210501aaSJohn David Anglin #define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
143deae26bfSKyle McMartin #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
144deae26bfSKyle McMartin #endif
145deae26bfSKyle McMartin 
146deae26bfSKyle McMartin #endif /* _PARISC_CACHEFLUSH_H */
147deae26bfSKyle McMartin 
148