xref: /linux/arch/xtensa/mm/cache.c (revision ee8287e068a3995b0f8001dd6931e221dfb7c530)
1 /*
2  * arch/xtensa/mm/cache.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001-2006 Tensilica Inc.
9  *
10  * Chris Zankel	<chris@zankel.net>
11  * Joe Taylor
12  * Marc Gauthier
13  *
14  */
15 
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 #include <linux/pgtable.h>
28 
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 #include <asm/tlbflush.h>
33 #include <asm/page.h>
34 
35 /*
36  * Note:
37  * The kernel provides one architecture bit PG_arch_1 in the page flags that
38  * can be used for cache coherency.
39  *
40  * I$-D$ coherency.
41  *
42  * The Xtensa architecture doesn't keep the instruction cache coherent with
43  * the data cache. We use the architecture bit to indicate if the caches
44  * are coherent. The kernel clears this bit whenever a page is added to the
45  * page cache. At that time, the caches might not be in sync. We, therefore,
46  * define this flag as 'clean' if set.
47  *
48  * D-cache aliasing.
49  *
50  * With cache aliasing, we have to always flush the cache when pages are
51  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
52  * page.
53  *
54  *
55  *
56  */
57 
58 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
59 static inline void kmap_invalidate_coherent(struct page *page,
60 					    unsigned long vaddr)
61 {
62 	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
63 		unsigned long kvaddr;
64 
65 		if (!PageHighMem(page)) {
66 			kvaddr = (unsigned long)page_to_virt(page);
67 
68 			__invalidate_dcache_page(kvaddr);
69 		} else {
70 			kvaddr = TLBTEMP_BASE_1 +
71 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
72 
73 			preempt_disable();
74 			__invalidate_dcache_page_alias(kvaddr,
75 						       page_to_phys(page));
76 			preempt_enable();
77 		}
78 	}
79 }
80 
81 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
82 				    unsigned long vaddr, unsigned long *paddr)
83 {
84 	*paddr = page_to_phys(page);
85 	return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
86 }
87 
88 void clear_user_highpage(struct page *page, unsigned long vaddr)
89 {
90 	struct folio *folio = page_folio(page);
91 	unsigned long paddr;
92 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
93 
94 	preempt_disable();
95 	kmap_invalidate_coherent(page, vaddr);
96 	set_bit(PG_arch_1, folio_flags(folio, 0));
97 	clear_page_alias(kvaddr, paddr);
98 	preempt_enable();
99 }
100 EXPORT_SYMBOL(clear_user_highpage);
101 
102 void copy_user_highpage(struct page *dst, struct page *src,
103 			unsigned long vaddr, struct vm_area_struct *vma)
104 {
105 	struct folio *folio = page_folio(dst);
106 	unsigned long dst_paddr, src_paddr;
107 	void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
108 					  &dst_paddr);
109 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
110 					  &src_paddr);
111 
112 	preempt_disable();
113 	kmap_invalidate_coherent(dst, vaddr);
114 	set_bit(PG_arch_1, folio_flags(folio, 0));
115 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
116 	preempt_enable();
117 }
118 EXPORT_SYMBOL(copy_user_highpage);
119 
120 /*
121  * Any time the kernel writes to a user page cache page, or it is about to
122  * read from a page cache page this routine is called.
123  *
124  */
125 
126 void flush_dcache_folio(struct folio *folio)
127 {
128 	struct address_space *mapping = folio_flush_mapping(folio);
129 
130 	/*
131 	 * If we have a mapping but the page is not mapped to user-space
132 	 * yet, we simply mark this page dirty and defer flushing the
133 	 * caches until update_mmu().
134 	 */
135 
136 	if (mapping && !mapping_mapped(mapping)) {
137 		if (!test_bit(PG_arch_1, &folio->flags))
138 			set_bit(PG_arch_1, &folio->flags);
139 		return;
140 
141 	} else {
142 		unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
143 		unsigned long temp = folio_pos(folio);
144 		unsigned int i, nr = folio_nr_pages(folio);
145 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
146 		unsigned long virt;
147 
148 		/*
149 		 * Flush the page in kernel space and user space.
150 		 * Note that we can omit that step if aliasing is not
151 		 * an issue, but we do have to synchronize I$ and D$
152 		 * if we have a mapping.
153 		 */
154 
155 		if (!alias && !mapping)
156 			return;
157 
158 		preempt_disable();
159 		for (i = 0; i < nr; i++) {
160 			virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
161 			__flush_invalidate_dcache_page_alias(virt, phys);
162 
163 			virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
164 
165 			if (alias)
166 				__flush_invalidate_dcache_page_alias(virt, phys);
167 
168 			if (mapping)
169 				__invalidate_icache_page_alias(virt, phys);
170 			phys += PAGE_SIZE;
171 			temp += PAGE_SIZE;
172 		}
173 		preempt_enable();
174 	}
175 
176 	/* There shouldn't be an entry in the cache for this page anymore. */
177 }
178 EXPORT_SYMBOL(flush_dcache_folio);
179 
180 /*
181  * For now, flush the whole cache. FIXME??
182  */
183 
184 void local_flush_cache_range(struct vm_area_struct *vma,
185 		       unsigned long start, unsigned long end)
186 {
187 	__flush_invalidate_dcache_all();
188 	__invalidate_icache_all();
189 }
190 EXPORT_SYMBOL(local_flush_cache_range);
191 
192 /*
193  * Remove any entry in the cache for this page.
194  *
195  * Note that this function is only called for user pages, so use the
196  * alias versions of the cache flush functions.
197  */
198 
199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
200 		      unsigned long pfn)
201 {
202 	/* Note that we have to use the 'alias' address to avoid multi-hit */
203 
204 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
205 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
206 
207 	preempt_disable();
208 	__flush_invalidate_dcache_page_alias(virt, phys);
209 	__invalidate_icache_page_alias(virt, phys);
210 	preempt_enable();
211 }
212 EXPORT_SYMBOL(local_flush_cache_page);
213 
214 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
215 
216 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
217 		unsigned long addr, pte_t *ptep, unsigned int nr)
218 {
219 	unsigned long pfn = pte_pfn(*ptep);
220 	struct folio *folio;
221 	unsigned int i;
222 
223 	if (!pfn_valid(pfn))
224 		return;
225 
226 	folio = page_folio(pfn_to_page(pfn));
227 
228 	/* Invalidate old entries in TLBs */
229 	for (i = 0; i < nr; i++)
230 		flush_tlb_page(vma, addr + i * PAGE_SIZE);
231 	nr = folio_nr_pages(folio);
232 
233 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
234 
235 	if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
236 		unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
237 		unsigned long tmp;
238 
239 		preempt_disable();
240 		for (i = 0; i < nr; i++) {
241 			tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
242 			__flush_invalidate_dcache_page_alias(tmp, phys);
243 			tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
244 			__flush_invalidate_dcache_page_alias(tmp, phys);
245 			__invalidate_icache_page_alias(tmp, phys);
246 			phys += PAGE_SIZE;
247 		}
248 		preempt_enable();
249 
250 		clear_bit(PG_arch_1, &folio->flags);
251 	}
252 #else
253 	if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
254 	    && (vma->vm_flags & VM_EXEC) != 0) {
255 		for (i = 0; i < nr; i++) {
256 			void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
257 			__flush_dcache_page((unsigned long)paddr);
258 			__invalidate_icache_page((unsigned long)paddr);
259 			kunmap_local(paddr);
260 		}
261 		set_bit(PG_arch_1, &folio->flags);
262 	}
263 #endif
264 }
265 
266 /*
267  * access_process_vm() has called get_user_pages(), which has done a
268  * flush_dcache_page() on the page.
269  */
270 
271 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
272 
273 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
274 		unsigned long vaddr, void *dst, const void *src,
275 		unsigned long len)
276 {
277 	unsigned long phys = page_to_phys(page);
278 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
279 
280 	/* Flush and invalidate user page if aliased. */
281 
282 	if (alias) {
283 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
284 		preempt_disable();
285 		__flush_invalidate_dcache_page_alias(t, phys);
286 		preempt_enable();
287 	}
288 
289 	/* Copy data */
290 
291 	memcpy(dst, src, len);
292 
293 	/*
294 	 * Flush and invalidate kernel page if aliased and synchronize
295 	 * data and instruction caches for executable pages.
296 	 */
297 
298 	if (alias) {
299 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
300 
301 		preempt_disable();
302 		__flush_invalidate_dcache_range((unsigned long) dst, len);
303 		if ((vma->vm_flags & VM_EXEC) != 0)
304 			__invalidate_icache_page_alias(t, phys);
305 		preempt_enable();
306 
307 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
308 		__flush_dcache_range((unsigned long)dst,len);
309 		__invalidate_icache_range((unsigned long) dst, len);
310 	}
311 }
312 
313 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
314 		unsigned long vaddr, void *dst, const void *src,
315 		unsigned long len)
316 {
317 	unsigned long phys = page_to_phys(page);
318 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
319 
320 	/*
321 	 * Flush user page if aliased.
322 	 * (Note: a simply flush would be sufficient)
323 	 */
324 
325 	if (alias) {
326 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
327 		preempt_disable();
328 		__flush_invalidate_dcache_page_alias(t, phys);
329 		preempt_enable();
330 	}
331 
332 	memcpy(dst, src, len);
333 }
334 
335 #endif
336