1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/cachetype.h> 16 #include <asm/smp_plat.h> 17 #include <asm/system.h> 18 #include <asm/tlbflush.h> 19 20 #include "mm.h" 21 22 #ifdef CONFIG_CPU_CACHE_VIPT 23 24 #define ALIAS_FLUSH_START 0xffff4000 25 26 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 27 { 28 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 29 const int zero = 0; 30 31 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 32 flush_tlb_kernel_page(to); 33 34 asm( "mcrr p15, 0, %1, %0, c14\n" 35 " mcr p15, 0, %2, c7, c10, 4" 36 : 37 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 38 : "cc"); 39 } 40 41 void flush_cache_mm(struct mm_struct *mm) 42 { 43 if (cache_is_vivt()) { 44 vivt_flush_cache_mm(mm); 45 return; 46 } 47 48 if (cache_is_vipt_aliasing()) { 49 asm( "mcr p15, 0, %0, c7, c14, 0\n" 50 " mcr p15, 0, %0, c7, c10, 4" 51 : 52 : "r" (0) 53 : "cc"); 54 } 55 } 56 57 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 58 { 59 if (cache_is_vivt()) { 60 vivt_flush_cache_range(vma, start, end); 61 return; 62 } 63 64 if (cache_is_vipt_aliasing()) { 65 asm( "mcr p15, 0, %0, c7, c14, 0\n" 66 " mcr p15, 0, %0, c7, c10, 4" 67 : 68 : "r" (0) 69 : "cc"); 70 } 71 72 if (vma->vm_flags & VM_EXEC) 73 __flush_icache_all(); 74 } 75 76 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 77 { 78 if (cache_is_vivt()) { 79 vivt_flush_cache_page(vma, user_addr, pfn); 80 return; 81 } 82 83 if (cache_is_vipt_aliasing()) { 84 flush_pfn_alias(pfn, user_addr); 85 __flush_icache_all(); 86 } 87 88 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 89 __flush_icache_all(); 90 } 91 #else 92 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 93 #endif 94 95 #ifdef CONFIG_SMP 96 static void flush_ptrace_access_other(void *args) 97 { 98 __flush_icache_all(); 99 } 100 #endif 101 102 static 103 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 104 unsigned long uaddr, void *kaddr, unsigned long len) 105 { 106 if (cache_is_vivt()) { 107 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 108 unsigned long addr = (unsigned long)kaddr; 109 __cpuc_coherent_kern_range(addr, addr + len); 110 } 111 return; 112 } 113 114 if (cache_is_vipt_aliasing()) { 115 flush_pfn_alias(page_to_pfn(page), uaddr); 116 __flush_icache_all(); 117 return; 118 } 119 120 /* VIPT non-aliasing cache */ 121 if (vma->vm_flags & VM_EXEC) { 122 unsigned long addr = (unsigned long)kaddr; 123 __cpuc_coherent_kern_range(addr, addr + len); 124 #ifdef CONFIG_SMP 125 if (cache_ops_need_broadcast()) 126 smp_call_function(flush_ptrace_access_other, 127 NULL, 1); 128 #endif 129 } 130 } 131 132 /* 133 * Copy user data from/to a page which is mapped into a different 134 * processes address space. Really, we want to allow our "user 135 * space" model to handle this. 136 * 137 * Note that this code needs to run on the current CPU. 138 */ 139 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 140 unsigned long uaddr, void *dst, const void *src, 141 unsigned long len) 142 { 143 #ifdef CONFIG_SMP 144 preempt_disable(); 145 #endif 146 memcpy(dst, src, len); 147 flush_ptrace_access(vma, page, uaddr, dst, len); 148 #ifdef CONFIG_SMP 149 preempt_enable(); 150 #endif 151 } 152 153 void __flush_dcache_page(struct address_space *mapping, struct page *page) 154 { 155 void *addr = page_address(page); 156 157 /* 158 * Writeback any data associated with the kernel mapping of this 159 * page. This ensures that data in the physical page is mutually 160 * coherent with the kernels mapping. 161 */ 162 #ifdef CONFIG_HIGHMEM 163 /* 164 * kmap_atomic() doesn't set the page virtual address, and 165 * kunmap_atomic() takes care of cache flushing already. 166 */ 167 if (addr) 168 #endif 169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 170 171 /* 172 * If this is a page cache page, and we have an aliasing VIPT cache, 173 * we only need to do one flush - which would be at the relevant 174 * userspace colour, which is congruent with page->index. 175 */ 176 if (mapping && cache_is_vipt_aliasing()) 177 flush_pfn_alias(page_to_pfn(page), 178 page->index << PAGE_CACHE_SHIFT); 179 } 180 181 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 182 { 183 struct mm_struct *mm = current->active_mm; 184 struct vm_area_struct *mpnt; 185 struct prio_tree_iter iter; 186 pgoff_t pgoff; 187 188 /* 189 * There are possible user space mappings of this page: 190 * - VIVT cache: we need to also write back and invalidate all user 191 * data in the current VM view associated with this page. 192 * - aliasing VIPT: we only need to find one mapping of this page. 193 */ 194 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 195 196 flush_dcache_mmap_lock(mapping); 197 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 198 unsigned long offset; 199 200 /* 201 * If this VMA is not in our MM, we can ignore it. 202 */ 203 if (mpnt->vm_mm != mm) 204 continue; 205 if (!(mpnt->vm_flags & VM_MAYSHARE)) 206 continue; 207 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 208 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 209 } 210 flush_dcache_mmap_unlock(mapping); 211 } 212 213 /* 214 * Ensure cache coherency between kernel mapping and userspace mapping 215 * of this page. 216 * 217 * We have three cases to consider: 218 * - VIPT non-aliasing cache: fully coherent so nothing required. 219 * - VIVT: fully aliasing, so we need to handle every alias in our 220 * current VM view. 221 * - VIPT aliasing: need to handle one alias in our current VM view. 222 * 223 * If we need to handle aliasing: 224 * If the page only exists in the page cache and there are no user 225 * space mappings, we can be lazy and remember that we may have dirty 226 * kernel cache lines for later. Otherwise, we assume we have 227 * aliasing mappings. 228 * 229 * Note that we disable the lazy flush for SMP. 230 */ 231 void flush_dcache_page(struct page *page) 232 { 233 struct address_space *mapping; 234 235 /* 236 * The zero page is never written to, so never has any dirty 237 * cache lines, and therefore never needs to be flushed. 238 */ 239 if (page == ZERO_PAGE(0)) 240 return; 241 242 mapping = page_mapping(page); 243 244 #ifndef CONFIG_SMP 245 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 246 set_bit(PG_dcache_dirty, &page->flags); 247 else 248 #endif 249 { 250 __flush_dcache_page(mapping, page); 251 if (mapping && cache_is_vivt()) 252 __flush_dcache_aliases(mapping, page); 253 else if (mapping) 254 __flush_icache_all(); 255 } 256 } 257 EXPORT_SYMBOL(flush_dcache_page); 258 259 /* 260 * Flush an anonymous page so that users of get_user_pages() 261 * can safely access the data. The expected sequence is: 262 * 263 * get_user_pages() 264 * -> flush_anon_page 265 * memcpy() to/from page 266 * if written to page, flush_dcache_page() 267 */ 268 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 269 { 270 unsigned long pfn; 271 272 /* VIPT non-aliasing caches need do nothing */ 273 if (cache_is_vipt_nonaliasing()) 274 return; 275 276 /* 277 * Write back and invalidate userspace mapping. 278 */ 279 pfn = page_to_pfn(page); 280 if (cache_is_vivt()) { 281 flush_cache_page(vma, vmaddr, pfn); 282 } else { 283 /* 284 * For aliasing VIPT, we can flush an alias of the 285 * userspace address only. 286 */ 287 flush_pfn_alias(pfn, vmaddr); 288 __flush_icache_all(); 289 } 290 291 /* 292 * Invalidate kernel mapping. No data should be contained 293 * in this mapping of the page. FIXME: this is overkill 294 * since we actually ask for a write-back and invalidate. 295 */ 296 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 297 } 298