1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/highmem.h> 14 15 #include <asm/cacheflush.h> 16 #include <asm/cachetype.h> 17 #include <asm/highmem.h> 18 #include <asm/smp_plat.h> 19 #include <asm/system.h> 20 #include <asm/tlbflush.h> 21 22 #include "mm.h" 23 24 #ifdef CONFIG_CPU_CACHE_VIPT 25 26 #define ALIAS_FLUSH_START 0xffff4000 27 28 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 29 { 30 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 31 const int zero = 0; 32 33 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 34 flush_tlb_kernel_page(to); 35 36 asm( "mcrr p15, 0, %1, %0, c14\n" 37 " mcr p15, 0, %2, c7, c10, 4" 38 : 39 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 40 : "cc"); 41 } 42 43 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 44 { 45 unsigned long colour = CACHE_COLOUR(vaddr); 46 unsigned long offset = vaddr & (PAGE_SIZE - 1); 47 unsigned long to; 48 49 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 50 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; 51 flush_tlb_kernel_page(to); 52 flush_icache_range(to, to + len); 53 } 54 55 void flush_cache_mm(struct mm_struct *mm) 56 { 57 if (cache_is_vivt()) { 58 vivt_flush_cache_mm(mm); 59 return; 60 } 61 62 if (cache_is_vipt_aliasing()) { 63 asm( "mcr p15, 0, %0, c7, c14, 0\n" 64 " mcr p15, 0, %0, c7, c10, 4" 65 : 66 : "r" (0) 67 : "cc"); 68 } 69 } 70 71 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 72 { 73 if (cache_is_vivt()) { 74 vivt_flush_cache_range(vma, start, end); 75 return; 76 } 77 78 if (cache_is_vipt_aliasing()) { 79 asm( "mcr p15, 0, %0, c7, c14, 0\n" 80 " mcr p15, 0, %0, c7, c10, 4" 81 : 82 : "r" (0) 83 : "cc"); 84 } 85 86 if (vma->vm_flags & VM_EXEC) 87 __flush_icache_all(); 88 } 89 90 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 91 { 92 if (cache_is_vivt()) { 93 vivt_flush_cache_page(vma, user_addr, pfn); 94 return; 95 } 96 97 if (cache_is_vipt_aliasing()) { 98 flush_pfn_alias(pfn, user_addr); 99 __flush_icache_all(); 100 } 101 102 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 103 __flush_icache_all(); 104 } 105 106 #else 107 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 108 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) 109 #endif 110 111 static void flush_ptrace_access_other(void *args) 112 { 113 __flush_icache_all(); 114 } 115 116 static 117 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 118 unsigned long uaddr, void *kaddr, unsigned long len) 119 { 120 if (cache_is_vivt()) { 121 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 122 unsigned long addr = (unsigned long)kaddr; 123 __cpuc_coherent_kern_range(addr, addr + len); 124 } 125 return; 126 } 127 128 if (cache_is_vipt_aliasing()) { 129 flush_pfn_alias(page_to_pfn(page), uaddr); 130 __flush_icache_all(); 131 return; 132 } 133 134 /* VIPT non-aliasing D-cache */ 135 if (vma->vm_flags & VM_EXEC) { 136 unsigned long addr = (unsigned long)kaddr; 137 if (icache_is_vipt_aliasing()) 138 flush_icache_alias(page_to_pfn(page), uaddr, len); 139 else 140 __cpuc_coherent_kern_range(addr, addr + len); 141 if (cache_ops_need_broadcast()) 142 smp_call_function(flush_ptrace_access_other, 143 NULL, 1); 144 } 145 } 146 147 /* 148 * Copy user data from/to a page which is mapped into a different 149 * processes address space. Really, we want to allow our "user 150 * space" model to handle this. 151 * 152 * Note that this code needs to run on the current CPU. 153 */ 154 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 155 unsigned long uaddr, void *dst, const void *src, 156 unsigned long len) 157 { 158 #ifdef CONFIG_SMP 159 preempt_disable(); 160 #endif 161 memcpy(dst, src, len); 162 flush_ptrace_access(vma, page, uaddr, dst, len); 163 #ifdef CONFIG_SMP 164 preempt_enable(); 165 #endif 166 } 167 168 void __flush_dcache_page(struct address_space *mapping, struct page *page) 169 { 170 /* 171 * Writeback any data associated with the kernel mapping of this 172 * page. This ensures that data in the physical page is mutually 173 * coherent with the kernels mapping. 174 */ 175 if (!PageHighMem(page)) { 176 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 177 } else { 178 void *addr = kmap_high_get(page); 179 if (addr) { 180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 181 kunmap_high(page); 182 } else if (cache_is_vipt()) { 183 /* unmapped pages might still be cached */ 184 addr = kmap_atomic(page); 185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 186 kunmap_atomic(addr); 187 } 188 } 189 190 /* 191 * If this is a page cache page, and we have an aliasing VIPT cache, 192 * we only need to do one flush - which would be at the relevant 193 * userspace colour, which is congruent with page->index. 194 */ 195 if (mapping && cache_is_vipt_aliasing()) 196 flush_pfn_alias(page_to_pfn(page), 197 page->index << PAGE_CACHE_SHIFT); 198 } 199 200 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 201 { 202 struct mm_struct *mm = current->active_mm; 203 struct vm_area_struct *mpnt; 204 struct prio_tree_iter iter; 205 pgoff_t pgoff; 206 207 /* 208 * There are possible user space mappings of this page: 209 * - VIVT cache: we need to also write back and invalidate all user 210 * data in the current VM view associated with this page. 211 * - aliasing VIPT: we only need to find one mapping of this page. 212 */ 213 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 214 215 flush_dcache_mmap_lock(mapping); 216 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 217 unsigned long offset; 218 219 /* 220 * If this VMA is not in our MM, we can ignore it. 221 */ 222 if (mpnt->vm_mm != mm) 223 continue; 224 if (!(mpnt->vm_flags & VM_MAYSHARE)) 225 continue; 226 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 227 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 228 } 229 flush_dcache_mmap_unlock(mapping); 230 } 231 232 #if __LINUX_ARM_ARCH__ >= 6 233 void __sync_icache_dcache(pte_t pteval) 234 { 235 unsigned long pfn; 236 struct page *page; 237 struct address_space *mapping; 238 239 if (!pte_present_user(pteval)) 240 return; 241 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) 242 /* only flush non-aliasing VIPT caches for exec mappings */ 243 return; 244 pfn = pte_pfn(pteval); 245 if (!pfn_valid(pfn)) 246 return; 247 248 page = pfn_to_page(pfn); 249 if (cache_is_vipt_aliasing()) 250 mapping = page_mapping(page); 251 else 252 mapping = NULL; 253 254 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 255 __flush_dcache_page(mapping, page); 256 257 if (pte_exec(pteval)) 258 __flush_icache_all(); 259 } 260 #endif 261 262 /* 263 * Ensure cache coherency between kernel mapping and userspace mapping 264 * of this page. 265 * 266 * We have three cases to consider: 267 * - VIPT non-aliasing cache: fully coherent so nothing required. 268 * - VIVT: fully aliasing, so we need to handle every alias in our 269 * current VM view. 270 * - VIPT aliasing: need to handle one alias in our current VM view. 271 * 272 * If we need to handle aliasing: 273 * If the page only exists in the page cache and there are no user 274 * space mappings, we can be lazy and remember that we may have dirty 275 * kernel cache lines for later. Otherwise, we assume we have 276 * aliasing mappings. 277 * 278 * Note that we disable the lazy flush for SMP configurations where 279 * the cache maintenance operations are not automatically broadcasted. 280 */ 281 void flush_dcache_page(struct page *page) 282 { 283 struct address_space *mapping; 284 285 /* 286 * The zero page is never written to, so never has any dirty 287 * cache lines, and therefore never needs to be flushed. 288 */ 289 if (page == ZERO_PAGE(0)) 290 return; 291 292 mapping = page_mapping(page); 293 294 if (!cache_ops_need_broadcast() && 295 mapping && !mapping_mapped(mapping)) 296 clear_bit(PG_dcache_clean, &page->flags); 297 else { 298 __flush_dcache_page(mapping, page); 299 if (mapping && cache_is_vivt()) 300 __flush_dcache_aliases(mapping, page); 301 else if (mapping) 302 __flush_icache_all(); 303 set_bit(PG_dcache_clean, &page->flags); 304 } 305 } 306 EXPORT_SYMBOL(flush_dcache_page); 307 308 /* 309 * Flush an anonymous page so that users of get_user_pages() 310 * can safely access the data. The expected sequence is: 311 * 312 * get_user_pages() 313 * -> flush_anon_page 314 * memcpy() to/from page 315 * if written to page, flush_dcache_page() 316 */ 317 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 318 { 319 unsigned long pfn; 320 321 /* VIPT non-aliasing caches need do nothing */ 322 if (cache_is_vipt_nonaliasing()) 323 return; 324 325 /* 326 * Write back and invalidate userspace mapping. 327 */ 328 pfn = page_to_pfn(page); 329 if (cache_is_vivt()) { 330 flush_cache_page(vma, vmaddr, pfn); 331 } else { 332 /* 333 * For aliasing VIPT, we can flush an alias of the 334 * userspace address only. 335 */ 336 flush_pfn_alias(pfn, vmaddr); 337 __flush_icache_all(); 338 } 339 340 /* 341 * Invalidate kernel mapping. No data should be contained 342 * in this mapping of the page. FIXME: this is overkill 343 * since we actually ask for a write-back and invalidate. 344 */ 345 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 346 } 347