xref: /linux/arch/parisc/include/asm/cacheflush.h (revision deae26bf6a10e47983606f5df080b91e97650ead)
1*deae26bfSKyle McMartin #ifndef _PARISC_CACHEFLUSH_H
2*deae26bfSKyle McMartin #define _PARISC_CACHEFLUSH_H
3*deae26bfSKyle McMartin 
4*deae26bfSKyle McMartin #include <linux/mm.h>
5*deae26bfSKyle McMartin 
6*deae26bfSKyle McMartin /* The usual comment is "Caches aren't brain-dead on the <architecture>".
7*deae26bfSKyle McMartin  * Unfortunately, that doesn't apply to PA-RISC. */
8*deae26bfSKyle McMartin 
9*deae26bfSKyle McMartin /* Internal implementation */
10*deae26bfSKyle McMartin void flush_data_cache_local(void *);  /* flushes local data-cache only */
11*deae26bfSKyle McMartin void flush_instruction_cache_local(void *); /* flushes local code-cache only */
12*deae26bfSKyle McMartin #ifdef CONFIG_SMP
13*deae26bfSKyle McMartin void flush_data_cache(void); /* flushes data-cache only (all processors) */
14*deae26bfSKyle McMartin void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
15*deae26bfSKyle McMartin #else
16*deae26bfSKyle McMartin #define flush_data_cache() flush_data_cache_local(NULL)
17*deae26bfSKyle McMartin #define flush_instruction_cache() flush_instruction_cache_local(NULL)
18*deae26bfSKyle McMartin #endif
19*deae26bfSKyle McMartin 
20*deae26bfSKyle McMartin #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
21*deae26bfSKyle McMartin 
22*deae26bfSKyle McMartin void flush_user_icache_range_asm(unsigned long, unsigned long);
23*deae26bfSKyle McMartin void flush_kernel_icache_range_asm(unsigned long, unsigned long);
24*deae26bfSKyle McMartin void flush_user_dcache_range_asm(unsigned long, unsigned long);
25*deae26bfSKyle McMartin void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
26*deae26bfSKyle McMartin void flush_kernel_dcache_page_asm(void *);
27*deae26bfSKyle McMartin void flush_kernel_icache_page(void *);
28*deae26bfSKyle McMartin void flush_user_dcache_page(unsigned long);
29*deae26bfSKyle McMartin void flush_user_icache_page(unsigned long);
30*deae26bfSKyle McMartin void flush_user_dcache_range(unsigned long, unsigned long);
31*deae26bfSKyle McMartin void flush_user_icache_range(unsigned long, unsigned long);
32*deae26bfSKyle McMartin 
33*deae26bfSKyle McMartin /* Cache flush operations */
34*deae26bfSKyle McMartin 
35*deae26bfSKyle McMartin void flush_cache_all_local(void);
36*deae26bfSKyle McMartin void flush_cache_all(void);
37*deae26bfSKyle McMartin void flush_cache_mm(struct mm_struct *mm);
38*deae26bfSKyle McMartin 
39*deae26bfSKyle McMartin #define flush_kernel_dcache_range(start,size) \
40*deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((start), (start)+(size));
41*deae26bfSKyle McMartin 
42*deae26bfSKyle McMartin #define flush_cache_vmap(start, end)		flush_cache_all()
43*deae26bfSKyle McMartin #define flush_cache_vunmap(start, end)		flush_cache_all()
44*deae26bfSKyle McMartin 
45*deae26bfSKyle McMartin extern void flush_dcache_page(struct page *page);
46*deae26bfSKyle McMartin 
47*deae26bfSKyle McMartin #define flush_dcache_mmap_lock(mapping) \
48*deae26bfSKyle McMartin 	spin_lock_irq(&(mapping)->tree_lock)
49*deae26bfSKyle McMartin #define flush_dcache_mmap_unlock(mapping) \
50*deae26bfSKyle McMartin 	spin_unlock_irq(&(mapping)->tree_lock)
51*deae26bfSKyle McMartin 
52*deae26bfSKyle McMartin #define flush_icache_page(vma,page)	do { 		\
53*deae26bfSKyle McMartin 	flush_kernel_dcache_page(page);			\
54*deae26bfSKyle McMartin 	flush_kernel_icache_page(page_address(page)); 	\
55*deae26bfSKyle McMartin } while (0)
56*deae26bfSKyle McMartin 
57*deae26bfSKyle McMartin #define flush_icache_range(s,e)		do { 		\
58*deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm(s,e); 		\
59*deae26bfSKyle McMartin 	flush_kernel_icache_range_asm(s,e); 		\
60*deae26bfSKyle McMartin } while (0)
61*deae26bfSKyle McMartin 
62*deae26bfSKyle McMartin #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63*deae26bfSKyle McMartin do { \
64*deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
65*deae26bfSKyle McMartin 	memcpy(dst, src, len); \
66*deae26bfSKyle McMartin 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
67*deae26bfSKyle McMartin } while (0)
68*deae26bfSKyle McMartin 
69*deae26bfSKyle McMartin #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
70*deae26bfSKyle McMartin do { \
71*deae26bfSKyle McMartin 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
72*deae26bfSKyle McMartin 	memcpy(dst, src, len); \
73*deae26bfSKyle McMartin } while (0)
74*deae26bfSKyle McMartin 
75*deae26bfSKyle McMartin void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
76*deae26bfSKyle McMartin void flush_cache_range(struct vm_area_struct *vma,
77*deae26bfSKyle McMartin 		unsigned long start, unsigned long end);
78*deae26bfSKyle McMartin 
79*deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_ANON_PAGE
80*deae26bfSKyle McMartin static inline void
81*deae26bfSKyle McMartin flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
82*deae26bfSKyle McMartin {
83*deae26bfSKyle McMartin 	if (PageAnon(page))
84*deae26bfSKyle McMartin 		flush_user_dcache_page(vmaddr);
85*deae26bfSKyle McMartin }
86*deae26bfSKyle McMartin 
87*deae26bfSKyle McMartin #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
88*deae26bfSKyle McMartin void flush_kernel_dcache_page_addr(void *addr);
89*deae26bfSKyle McMartin static inline void flush_kernel_dcache_page(struct page *page)
90*deae26bfSKyle McMartin {
91*deae26bfSKyle McMartin 	flush_kernel_dcache_page_addr(page_address(page));
92*deae26bfSKyle McMartin }
93*deae26bfSKyle McMartin 
94*deae26bfSKyle McMartin #ifdef CONFIG_DEBUG_RODATA
95*deae26bfSKyle McMartin void mark_rodata_ro(void);
96*deae26bfSKyle McMartin #endif
97*deae26bfSKyle McMartin 
98*deae26bfSKyle McMartin #ifdef CONFIG_PA8X00
99*deae26bfSKyle McMartin /* Only pa8800, pa8900 needs this */
100*deae26bfSKyle McMartin #define ARCH_HAS_KMAP
101*deae26bfSKyle McMartin 
102*deae26bfSKyle McMartin void kunmap_parisc(void *addr);
103*deae26bfSKyle McMartin 
104*deae26bfSKyle McMartin static inline void *kmap(struct page *page)
105*deae26bfSKyle McMartin {
106*deae26bfSKyle McMartin 	might_sleep();
107*deae26bfSKyle McMartin 	return page_address(page);
108*deae26bfSKyle McMartin }
109*deae26bfSKyle McMartin 
110*deae26bfSKyle McMartin #define kunmap(page)			kunmap_parisc(page_address(page))
111*deae26bfSKyle McMartin 
112*deae26bfSKyle McMartin #define kmap_atomic(page, idx)		page_address(page)
113*deae26bfSKyle McMartin 
114*deae26bfSKyle McMartin #define kunmap_atomic(addr, idx)	kunmap_parisc(addr)
115*deae26bfSKyle McMartin 
116*deae26bfSKyle McMartin #define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
117*deae26bfSKyle McMartin #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
118*deae26bfSKyle McMartin #endif
119*deae26bfSKyle McMartin 
120*deae26bfSKyle McMartin #endif /* _PARISC_CACHEFLUSH_H */
121*deae26bfSKyle McMartin 
122