xref: /linux/arch/arm64/include/asm/cacheflush.h (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Based on arch/arm/include/asm/cacheflush.h
3  *
4  * Copyright (C) 1999-2002 Russell King.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_CACHEFLUSH_H
20 #define __ASM_CACHEFLUSH_H
21 
22 #include <linux/mm.h>
23 
24 /*
25  * This flag is used to indicate that the page pointed to by a pte is clean
26  * and does not require cleaning before returning it to the user.
27  */
28 #define PG_dcache_clean PG_arch_1
29 
30 /*
31  *	MM Cache Management
32  *	===================
33  *
34  *	The arch/arm64/mm/cache.S implements these methods.
35  *
36  *	Start addresses are inclusive and end addresses are exclusive; start
37  *	addresses should be rounded down, end addresses up.
38  *
39  *	See Documentation/cachetlb.txt for more information. Please note that
40  *	the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41  *	VIPT or ASID-tagged VIVT I-cache.
42  *
43  *	flush_cache_all()
44  *
45  *		Unconditionally clean and invalidate the entire cache.
46  *
47  *	flush_cache_mm(mm)
48  *
49  *		Clean and invalidate all user space cache entries
50  *		before a change of page tables.
51  *
52  *	flush_icache_range(start, end)
53  *
54  *		Ensure coherency between the I-cache and the D-cache in the
55  *		region described by start, end.
56  *		- start  - virtual start address
57  *		- end    - virtual end address
58  *
59  *	__flush_cache_user_range(start, end)
60  *
61  *		Ensure coherency between the I-cache and the D-cache in the
62  *		region described by start, end.
63  *		- start  - virtual start address
64  *		- end    - virtual end address
65  *
66  *	__flush_dcache_area(kaddr, size)
67  *
68  *		Ensure that the data held in page is written back.
69  *		- kaddr  - page address
70  *		- size   - region size
71  */
72 extern void flush_cache_all(void);
73 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
74 extern void flush_icache_range(unsigned long start, unsigned long end);
75 extern void __flush_dcache_area(void *addr, size_t len);
76 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
77 
78 static inline void flush_cache_mm(struct mm_struct *mm)
79 {
80 }
81 
82 static inline void flush_cache_page(struct vm_area_struct *vma,
83 				    unsigned long user_addr, unsigned long pfn)
84 {
85 }
86 
87 /*
88  * Cache maintenance functions used by the DMA API. No to be used directly.
89  */
90 extern void __dma_map_area(const void *, size_t, int);
91 extern void __dma_unmap_area(const void *, size_t, int);
92 extern void __dma_flush_range(const void *, const void *);
93 
94 /*
95  * Copy user data from/to a page which is mapped into a different
96  * processes address space.  Really, we want to allow our "user
97  * space" model to handle this.
98  */
99 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
100 	unsigned long, void *, const void *, unsigned long);
101 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
102 	do {							\
103 		memcpy(dst, src, len);				\
104 	} while (0)
105 
106 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
107 
108 /*
109  * flush_dcache_page is used when the kernel has written to the page
110  * cache page at virtual address page->virtual.
111  *
112  * If this page isn't mapped (ie, page_mapping == NULL), or it might
113  * have userspace mappings, then we _must_ always clean + invalidate
114  * the dcache entries associated with the kernel mapping.
115  *
116  * Otherwise we can defer the operation, and clean the cache when we are
117  * about to change to user space.  This is the same method as used on SPARC64.
118  * See update_mmu_cache for the user space part.
119  */
120 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
121 extern void flush_dcache_page(struct page *);
122 
123 static inline void __flush_icache_all(void)
124 {
125 	asm("ic	ialluis");
126 	dsb(ish);
127 }
128 
129 #define flush_dcache_mmap_lock(mapping) \
130 	spin_lock_irq(&(mapping)->tree_lock)
131 #define flush_dcache_mmap_unlock(mapping) \
132 	spin_unlock_irq(&(mapping)->tree_lock)
133 
134 /*
135  * We don't appear to need to do anything here.  In fact, if we did, we'd
136  * duplicate cache flushing elsewhere performed by flush_dcache_page().
137  */
138 #define flush_icache_page(vma,page)	do { } while (0)
139 
140 /*
141  * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
142  */
143 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
144 {
145 }
146 
147 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
148 {
149 }
150 
151 int set_memory_ro(unsigned long addr, int numpages);
152 int set_memory_rw(unsigned long addr, int numpages);
153 int set_memory_x(unsigned long addr, int numpages);
154 int set_memory_nx(unsigned long addr, int numpages);
155 #endif
156