1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/cachetype.h> 16 #include <asm/highmem.h> 17 #include <asm/smp_plat.h> 18 #include <asm/system.h> 19 #include <asm/tlbflush.h> 20 21 #include "mm.h" 22 23 #ifdef CONFIG_CPU_CACHE_VIPT 24 25 #define ALIAS_FLUSH_START 0xffff4000 26 27 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 28 { 29 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 30 const int zero = 0; 31 32 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 33 flush_tlb_kernel_page(to); 34 35 asm( "mcrr p15, 0, %1, %0, c14\n" 36 " mcr p15, 0, %2, c7, c10, 4" 37 : 38 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 39 : "cc"); 40 } 41 42 void flush_cache_mm(struct mm_struct *mm) 43 { 44 if (cache_is_vivt()) { 45 vivt_flush_cache_mm(mm); 46 return; 47 } 48 49 if (cache_is_vipt_aliasing()) { 50 asm( "mcr p15, 0, %0, c7, c14, 0\n" 51 " mcr p15, 0, %0, c7, c10, 4" 52 : 53 : "r" (0) 54 : "cc"); 55 } 56 } 57 58 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 59 { 60 if (cache_is_vivt()) { 61 vivt_flush_cache_range(vma, start, end); 62 return; 63 } 64 65 if (cache_is_vipt_aliasing()) { 66 asm( "mcr p15, 0, %0, c7, c14, 0\n" 67 " mcr p15, 0, %0, c7, c10, 4" 68 : 69 : "r" (0) 70 : "cc"); 71 } 72 73 if (vma->vm_flags & VM_EXEC) 74 __flush_icache_all(); 75 } 76 77 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 78 { 79 if (cache_is_vivt()) { 80 vivt_flush_cache_page(vma, user_addr, pfn); 81 return; 82 } 83 84 if (cache_is_vipt_aliasing()) { 85 flush_pfn_alias(pfn, user_addr); 86 __flush_icache_all(); 87 } 88 89 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 90 __flush_icache_all(); 91 } 92 #else 93 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 94 #endif 95 96 #ifdef CONFIG_SMP 97 static void flush_ptrace_access_other(void *args) 98 { 99 __flush_icache_all(); 100 } 101 #endif 102 103 static 104 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 105 unsigned long uaddr, void *kaddr, unsigned long len) 106 { 107 if (cache_is_vivt()) { 108 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 109 unsigned long addr = (unsigned long)kaddr; 110 __cpuc_coherent_kern_range(addr, addr + len); 111 } 112 return; 113 } 114 115 if (cache_is_vipt_aliasing()) { 116 flush_pfn_alias(page_to_pfn(page), uaddr); 117 __flush_icache_all(); 118 return; 119 } 120 121 /* VIPT non-aliasing cache */ 122 if (vma->vm_flags & VM_EXEC) { 123 unsigned long addr = (unsigned long)kaddr; 124 __cpuc_coherent_kern_range(addr, addr + len); 125 #ifdef CONFIG_SMP 126 if (cache_ops_need_broadcast()) 127 smp_call_function(flush_ptrace_access_other, 128 NULL, 1); 129 #endif 130 } 131 } 132 133 /* 134 * Copy user data from/to a page which is mapped into a different 135 * processes address space. Really, we want to allow our "user 136 * space" model to handle this. 137 * 138 * Note that this code needs to run on the current CPU. 139 */ 140 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 141 unsigned long uaddr, void *dst, const void *src, 142 unsigned long len) 143 { 144 #ifdef CONFIG_SMP 145 preempt_disable(); 146 #endif 147 memcpy(dst, src, len); 148 flush_ptrace_access(vma, page, uaddr, dst, len); 149 #ifdef CONFIG_SMP 150 preempt_enable(); 151 #endif 152 } 153 154 void __flush_dcache_page(struct address_space *mapping, struct page *page) 155 { 156 /* 157 * Writeback any data associated with the kernel mapping of this 158 * page. This ensures that data in the physical page is mutually 159 * coherent with the kernels mapping. 160 */ 161 if (!PageHighMem(page)) { 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 163 } else { 164 void *addr = kmap_high_get(page); 165 if (addr) { 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 167 kunmap_high(page); 168 } else if (cache_is_vipt()) { 169 pte_t saved_pte; 170 addr = kmap_high_l1_vipt(page, &saved_pte); 171 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 172 kunmap_high_l1_vipt(page, saved_pte); 173 } 174 } 175 176 /* 177 * If this is a page cache page, and we have an aliasing VIPT cache, 178 * we only need to do one flush - which would be at the relevant 179 * userspace colour, which is congruent with page->index. 180 */ 181 if (mapping && cache_is_vipt_aliasing()) 182 flush_pfn_alias(page_to_pfn(page), 183 page->index << PAGE_CACHE_SHIFT); 184 } 185 186 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 187 { 188 struct mm_struct *mm = current->active_mm; 189 struct vm_area_struct *mpnt; 190 struct prio_tree_iter iter; 191 pgoff_t pgoff; 192 193 /* 194 * There are possible user space mappings of this page: 195 * - VIVT cache: we need to also write back and invalidate all user 196 * data in the current VM view associated with this page. 197 * - aliasing VIPT: we only need to find one mapping of this page. 198 */ 199 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 200 201 flush_dcache_mmap_lock(mapping); 202 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 203 unsigned long offset; 204 205 /* 206 * If this VMA is not in our MM, we can ignore it. 207 */ 208 if (mpnt->vm_mm != mm) 209 continue; 210 if (!(mpnt->vm_flags & VM_MAYSHARE)) 211 continue; 212 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 213 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 214 } 215 flush_dcache_mmap_unlock(mapping); 216 } 217 218 /* 219 * Ensure cache coherency between kernel mapping and userspace mapping 220 * of this page. 221 * 222 * We have three cases to consider: 223 * - VIPT non-aliasing cache: fully coherent so nothing required. 224 * - VIVT: fully aliasing, so we need to handle every alias in our 225 * current VM view. 226 * - VIPT aliasing: need to handle one alias in our current VM view. 227 * 228 * If we need to handle aliasing: 229 * If the page only exists in the page cache and there are no user 230 * space mappings, we can be lazy and remember that we may have dirty 231 * kernel cache lines for later. Otherwise, we assume we have 232 * aliasing mappings. 233 * 234 * Note that we disable the lazy flush for SMP. 235 */ 236 void flush_dcache_page(struct page *page) 237 { 238 struct address_space *mapping; 239 240 /* 241 * The zero page is never written to, so never has any dirty 242 * cache lines, and therefore never needs to be flushed. 243 */ 244 if (page == ZERO_PAGE(0)) 245 return; 246 247 mapping = page_mapping(page); 248 249 #ifndef CONFIG_SMP 250 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 251 set_bit(PG_dcache_dirty, &page->flags); 252 else 253 #endif 254 { 255 __flush_dcache_page(mapping, page); 256 if (mapping && cache_is_vivt()) 257 __flush_dcache_aliases(mapping, page); 258 else if (mapping) 259 __flush_icache_all(); 260 } 261 } 262 EXPORT_SYMBOL(flush_dcache_page); 263 264 /* 265 * Flush an anonymous page so that users of get_user_pages() 266 * can safely access the data. The expected sequence is: 267 * 268 * get_user_pages() 269 * -> flush_anon_page 270 * memcpy() to/from page 271 * if written to page, flush_dcache_page() 272 */ 273 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 274 { 275 unsigned long pfn; 276 277 /* VIPT non-aliasing caches need do nothing */ 278 if (cache_is_vipt_nonaliasing()) 279 return; 280 281 /* 282 * Write back and invalidate userspace mapping. 283 */ 284 pfn = page_to_pfn(page); 285 if (cache_is_vivt()) { 286 flush_cache_page(vma, vmaddr, pfn); 287 } else { 288 /* 289 * For aliasing VIPT, we can flush an alias of the 290 * userspace address only. 291 */ 292 flush_pfn_alias(pfn, vmaddr); 293 __flush_icache_all(); 294 } 295 296 /* 297 * Invalidate kernel mapping. No data should be contained 298 * in this mapping of the page. FIXME: this is overkill 299 * since we actually ask for a write-back and invalidate. 300 */ 301 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 302 } 303