xref: /linux/arch/arm/mm/flush.c (revision 2a2c74b2efcb1a0ca3fdcb5fbb96ad8de6a29177)
1 /*
2  *  linux/arch/arm/mm/flush.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/cachetype.h>
17 #include <asm/highmem.h>
18 #include <asm/smp_plat.h>
19 #include <asm/tlbflush.h>
20 #include <linux/hugetlb.h>
21 
22 #include "mm.h"
23 
24 #ifdef CONFIG_CPU_CACHE_VIPT
25 
26 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
27 {
28 	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
29 	const int zero = 0;
30 
31 	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
32 
33 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
34 	"	mcr	p15, 0, %2, c7, c10, 4"
35 	    :
36 	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 	    : "cc");
38 }
39 
40 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
41 {
42 	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
43 	unsigned long offset = vaddr & (PAGE_SIZE - 1);
44 	unsigned long to;
45 
46 	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
47 	to = va + offset;
48 	flush_icache_range(to, to + len);
49 }
50 
51 void flush_cache_mm(struct mm_struct *mm)
52 {
53 	if (cache_is_vivt()) {
54 		vivt_flush_cache_mm(mm);
55 		return;
56 	}
57 
58 	if (cache_is_vipt_aliasing()) {
59 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
60 		"	mcr	p15, 0, %0, c7, c10, 4"
61 		    :
62 		    : "r" (0)
63 		    : "cc");
64 	}
65 }
66 
67 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
68 {
69 	if (cache_is_vivt()) {
70 		vivt_flush_cache_range(vma, start, end);
71 		return;
72 	}
73 
74 	if (cache_is_vipt_aliasing()) {
75 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
76 		"	mcr	p15, 0, %0, c7, c10, 4"
77 		    :
78 		    : "r" (0)
79 		    : "cc");
80 	}
81 
82 	if (vma->vm_flags & VM_EXEC)
83 		__flush_icache_all();
84 }
85 
86 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
87 {
88 	if (cache_is_vivt()) {
89 		vivt_flush_cache_page(vma, user_addr, pfn);
90 		return;
91 	}
92 
93 	if (cache_is_vipt_aliasing()) {
94 		flush_pfn_alias(pfn, user_addr);
95 		__flush_icache_all();
96 	}
97 
98 	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
99 		__flush_icache_all();
100 }
101 
102 #else
103 #define flush_pfn_alias(pfn,vaddr)		do { } while (0)
104 #define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
105 #endif
106 
107 static void flush_ptrace_access_other(void *args)
108 {
109 	__flush_icache_all();
110 }
111 
112 static
113 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
114 			 unsigned long uaddr, void *kaddr, unsigned long len)
115 {
116 	if (cache_is_vivt()) {
117 		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
118 			unsigned long addr = (unsigned long)kaddr;
119 			__cpuc_coherent_kern_range(addr, addr + len);
120 		}
121 		return;
122 	}
123 
124 	if (cache_is_vipt_aliasing()) {
125 		flush_pfn_alias(page_to_pfn(page), uaddr);
126 		__flush_icache_all();
127 		return;
128 	}
129 
130 	/* VIPT non-aliasing D-cache */
131 	if (vma->vm_flags & VM_EXEC) {
132 		unsigned long addr = (unsigned long)kaddr;
133 		if (icache_is_vipt_aliasing())
134 			flush_icache_alias(page_to_pfn(page), uaddr, len);
135 		else
136 			__cpuc_coherent_kern_range(addr, addr + len);
137 		if (cache_ops_need_broadcast())
138 			smp_call_function(flush_ptrace_access_other,
139 					  NULL, 1);
140 	}
141 }
142 
143 /*
144  * Copy user data from/to a page which is mapped into a different
145  * processes address space.  Really, we want to allow our "user
146  * space" model to handle this.
147  *
148  * Note that this code needs to run on the current CPU.
149  */
150 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
151 		       unsigned long uaddr, void *dst, const void *src,
152 		       unsigned long len)
153 {
154 #ifdef CONFIG_SMP
155 	preempt_disable();
156 #endif
157 	memcpy(dst, src, len);
158 	flush_ptrace_access(vma, page, uaddr, dst, len);
159 #ifdef CONFIG_SMP
160 	preempt_enable();
161 #endif
162 }
163 
164 void __flush_dcache_page(struct address_space *mapping, struct page *page)
165 {
166 	/*
167 	 * Writeback any data associated with the kernel mapping of this
168 	 * page.  This ensures that data in the physical page is mutually
169 	 * coherent with the kernels mapping.
170 	 */
171 	if (!PageHighMem(page)) {
172 		size_t page_size = PAGE_SIZE << compound_order(page);
173 		__cpuc_flush_dcache_area(page_address(page), page_size);
174 	} else {
175 		unsigned long i;
176 		if (cache_is_vipt_nonaliasing()) {
177 			for (i = 0; i < (1 << compound_order(page)); i++) {
178 				void *addr = kmap_atomic(page + i);
179 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
180 				kunmap_atomic(addr);
181 			}
182 		} else {
183 			for (i = 0; i < (1 << compound_order(page)); i++) {
184 				void *addr = kmap_high_get(page + i);
185 				if (addr) {
186 					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 					kunmap_high(page + i);
188 				}
189 			}
190 		}
191 	}
192 
193 	/*
194 	 * If this is a page cache page, and we have an aliasing VIPT cache,
195 	 * we only need to do one flush - which would be at the relevant
196 	 * userspace colour, which is congruent with page->index.
197 	 */
198 	if (mapping && cache_is_vipt_aliasing())
199 		flush_pfn_alias(page_to_pfn(page),
200 				page->index << PAGE_CACHE_SHIFT);
201 }
202 
203 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
204 {
205 	struct mm_struct *mm = current->active_mm;
206 	struct vm_area_struct *mpnt;
207 	pgoff_t pgoff;
208 
209 	/*
210 	 * There are possible user space mappings of this page:
211 	 * - VIVT cache: we need to also write back and invalidate all user
212 	 *   data in the current VM view associated with this page.
213 	 * - aliasing VIPT: we only need to find one mapping of this page.
214 	 */
215 	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
216 
217 	flush_dcache_mmap_lock(mapping);
218 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
219 		unsigned long offset;
220 
221 		/*
222 		 * If this VMA is not in our MM, we can ignore it.
223 		 */
224 		if (mpnt->vm_mm != mm)
225 			continue;
226 		if (!(mpnt->vm_flags & VM_MAYSHARE))
227 			continue;
228 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
229 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
230 	}
231 	flush_dcache_mmap_unlock(mapping);
232 }
233 
234 #if __LINUX_ARM_ARCH__ >= 6
235 void __sync_icache_dcache(pte_t pteval)
236 {
237 	unsigned long pfn;
238 	struct page *page;
239 	struct address_space *mapping;
240 
241 	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242 		/* only flush non-aliasing VIPT caches for exec mappings */
243 		return;
244 	pfn = pte_pfn(pteval);
245 	if (!pfn_valid(pfn))
246 		return;
247 
248 	page = pfn_to_page(pfn);
249 	if (cache_is_vipt_aliasing())
250 		mapping = page_mapping(page);
251 	else
252 		mapping = NULL;
253 
254 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255 		__flush_dcache_page(mapping, page);
256 
257 	if (pte_exec(pteval))
258 		__flush_icache_all();
259 }
260 #endif
261 
262 /*
263  * Ensure cache coherency between kernel mapping and userspace mapping
264  * of this page.
265  *
266  * We have three cases to consider:
267  *  - VIPT non-aliasing cache: fully coherent so nothing required.
268  *  - VIVT: fully aliasing, so we need to handle every alias in our
269  *          current VM view.
270  *  - VIPT aliasing: need to handle one alias in our current VM view.
271  *
272  * If we need to handle aliasing:
273  *  If the page only exists in the page cache and there are no user
274  *  space mappings, we can be lazy and remember that we may have dirty
275  *  kernel cache lines for later.  Otherwise, we assume we have
276  *  aliasing mappings.
277  *
278  * Note that we disable the lazy flush for SMP configurations where
279  * the cache maintenance operations are not automatically broadcasted.
280  */
281 void flush_dcache_page(struct page *page)
282 {
283 	struct address_space *mapping;
284 
285 	/*
286 	 * The zero page is never written to, so never has any dirty
287 	 * cache lines, and therefore never needs to be flushed.
288 	 */
289 	if (page == ZERO_PAGE(0))
290 		return;
291 
292 	mapping = page_mapping(page);
293 
294 	if (!cache_ops_need_broadcast() &&
295 	    mapping && !page_mapped(page))
296 		clear_bit(PG_dcache_clean, &page->flags);
297 	else {
298 		__flush_dcache_page(mapping, page);
299 		if (mapping && cache_is_vivt())
300 			__flush_dcache_aliases(mapping, page);
301 		else if (mapping)
302 			__flush_icache_all();
303 		set_bit(PG_dcache_clean, &page->flags);
304 	}
305 }
306 EXPORT_SYMBOL(flush_dcache_page);
307 
308 /*
309  * Ensure cache coherency for the kernel mapping of this page. We can
310  * assume that the page is pinned via kmap.
311  *
312  * If the page only exists in the page cache and there are no user
313  * space mappings, this is a no-op since the page was already marked
314  * dirty at creation.  Otherwise, we need to flush the dirty kernel
315  * cache lines directly.
316  */
317 void flush_kernel_dcache_page(struct page *page)
318 {
319 	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
320 		struct address_space *mapping;
321 
322 		mapping = page_mapping(page);
323 
324 		if (!mapping || mapping_mapped(mapping)) {
325 			void *addr;
326 
327 			addr = page_address(page);
328 			/*
329 			 * kmap_atomic() doesn't set the page virtual
330 			 * address for highmem pages, and
331 			 * kunmap_atomic() takes care of cache
332 			 * flushing already.
333 			 */
334 			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
335 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
336 		}
337 	}
338 }
339 EXPORT_SYMBOL(flush_kernel_dcache_page);
340 
341 /*
342  * Flush an anonymous page so that users of get_user_pages()
343  * can safely access the data.  The expected sequence is:
344  *
345  *  get_user_pages()
346  *    -> flush_anon_page
347  *  memcpy() to/from page
348  *  if written to page, flush_dcache_page()
349  */
350 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
351 {
352 	unsigned long pfn;
353 
354 	/* VIPT non-aliasing caches need do nothing */
355 	if (cache_is_vipt_nonaliasing())
356 		return;
357 
358 	/*
359 	 * Write back and invalidate userspace mapping.
360 	 */
361 	pfn = page_to_pfn(page);
362 	if (cache_is_vivt()) {
363 		flush_cache_page(vma, vmaddr, pfn);
364 	} else {
365 		/*
366 		 * For aliasing VIPT, we can flush an alias of the
367 		 * userspace address only.
368 		 */
369 		flush_pfn_alias(pfn, vmaddr);
370 		__flush_icache_all();
371 	}
372 
373 	/*
374 	 * Invalidate kernel mapping.  No data should be contained
375 	 * in this mapping of the page.  FIXME: this is overkill
376 	 * since we actually ask for a write-back and invalidate.
377 	 */
378 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
379 }
380