1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/cachetype.h> 16 #include <asm/system.h> 17 #include <asm/tlbflush.h> 18 19 #include "mm.h" 20 21 #ifdef CONFIG_CPU_CACHE_VIPT 22 23 #define ALIAS_FLUSH_START 0xffff4000 24 25 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 26 { 27 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 28 const int zero = 0; 29 30 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 31 flush_tlb_kernel_page(to); 32 33 asm( "mcrr p15, 0, %1, %0, c14\n" 34 " mcr p15, 0, %2, c7, c10, 4\n" 35 " mcr p15, 0, %2, c7, c5, 0\n" 36 : 37 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 38 : "cc"); 39 } 40 41 void flush_cache_mm(struct mm_struct *mm) 42 { 43 if (cache_is_vivt()) { 44 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 45 __cpuc_flush_user_all(); 46 return; 47 } 48 49 if (cache_is_vipt_aliasing()) { 50 asm( "mcr p15, 0, %0, c7, c14, 0\n" 51 " mcr p15, 0, %0, c7, c5, 0\n" 52 " mcr p15, 0, %0, c7, c10, 4" 53 : 54 : "r" (0) 55 : "cc"); 56 } 57 } 58 59 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 60 { 61 if (cache_is_vivt()) { 62 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 63 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 64 vma->vm_flags); 65 return; 66 } 67 68 if (cache_is_vipt_aliasing()) { 69 asm( "mcr p15, 0, %0, c7, c14, 0\n" 70 " mcr p15, 0, %0, c7, c5, 0\n" 71 " mcr p15, 0, %0, c7, c10, 4" 72 : 73 : "r" (0) 74 : "cc"); 75 } 76 } 77 78 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 79 { 80 if (cache_is_vivt()) { 81 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 82 unsigned long addr = user_addr & PAGE_MASK; 83 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 84 } 85 return; 86 } 87 88 if (cache_is_vipt_aliasing()) 89 flush_pfn_alias(pfn, user_addr); 90 } 91 92 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 93 unsigned long uaddr, void *kaddr, 94 unsigned long len, int write) 95 { 96 if (cache_is_vivt()) { 97 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 98 unsigned long addr = (unsigned long)kaddr; 99 __cpuc_coherent_kern_range(addr, addr + len); 100 } 101 return; 102 } 103 104 if (cache_is_vipt_aliasing()) { 105 flush_pfn_alias(page_to_pfn(page), uaddr); 106 return; 107 } 108 109 /* VIPT non-aliasing cache */ 110 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 111 vma->vm_flags & VM_EXEC) { 112 unsigned long addr = (unsigned long)kaddr; 113 /* only flushing the kernel mapping on non-aliasing VIPT */ 114 __cpuc_coherent_kern_range(addr, addr + len); 115 } 116 } 117 #else 118 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 119 #endif 120 121 void __flush_dcache_page(struct address_space *mapping, struct page *page) 122 { 123 /* 124 * Writeback any data associated with the kernel mapping of this 125 * page. This ensures that data in the physical page is mutually 126 * coherent with the kernels mapping. 127 */ 128 __cpuc_flush_dcache_page(page_address(page)); 129 130 /* 131 * If this is a page cache page, and we have an aliasing VIPT cache, 132 * we only need to do one flush - which would be at the relevant 133 * userspace colour, which is congruent with page->index. 134 */ 135 if (mapping && cache_is_vipt_aliasing()) 136 flush_pfn_alias(page_to_pfn(page), 137 page->index << PAGE_CACHE_SHIFT); 138 } 139 140 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) 141 { 142 struct mm_struct *mm = current->active_mm; 143 struct vm_area_struct *mpnt; 144 struct prio_tree_iter iter; 145 pgoff_t pgoff; 146 147 /* 148 * There are possible user space mappings of this page: 149 * - VIVT cache: we need to also write back and invalidate all user 150 * data in the current VM view associated with this page. 151 * - aliasing VIPT: we only need to find one mapping of this page. 152 */ 153 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 154 155 flush_dcache_mmap_lock(mapping); 156 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 157 unsigned long offset; 158 159 /* 160 * If this VMA is not in our MM, we can ignore it. 161 */ 162 if (mpnt->vm_mm != mm) 163 continue; 164 if (!(mpnt->vm_flags & VM_MAYSHARE)) 165 continue; 166 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 167 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 168 } 169 flush_dcache_mmap_unlock(mapping); 170 } 171 172 /* 173 * Ensure cache coherency between kernel mapping and userspace mapping 174 * of this page. 175 * 176 * We have three cases to consider: 177 * - VIPT non-aliasing cache: fully coherent so nothing required. 178 * - VIVT: fully aliasing, so we need to handle every alias in our 179 * current VM view. 180 * - VIPT aliasing: need to handle one alias in our current VM view. 181 * 182 * If we need to handle aliasing: 183 * If the page only exists in the page cache and there are no user 184 * space mappings, we can be lazy and remember that we may have dirty 185 * kernel cache lines for later. Otherwise, we assume we have 186 * aliasing mappings. 187 * 188 * Note that we disable the lazy flush for SMP. 189 */ 190 void flush_dcache_page(struct page *page) 191 { 192 struct address_space *mapping = page_mapping(page); 193 194 #ifndef CONFIG_SMP 195 if (mapping && !mapping_mapped(mapping)) 196 set_bit(PG_dcache_dirty, &page->flags); 197 else 198 #endif 199 { 200 __flush_dcache_page(mapping, page); 201 if (mapping && cache_is_vivt()) 202 __flush_dcache_aliases(mapping, page); 203 else if (mapping) 204 __flush_icache_all(); 205 } 206 } 207 EXPORT_SYMBOL(flush_dcache_page); 208 209 /* 210 * Flush an anonymous page so that users of get_user_pages() 211 * can safely access the data. The expected sequence is: 212 * 213 * get_user_pages() 214 * -> flush_anon_page 215 * memcpy() to/from page 216 * if written to page, flush_dcache_page() 217 */ 218 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 219 { 220 unsigned long pfn; 221 222 /* VIPT non-aliasing caches need do nothing */ 223 if (cache_is_vipt_nonaliasing()) 224 return; 225 226 /* 227 * Write back and invalidate userspace mapping. 228 */ 229 pfn = page_to_pfn(page); 230 if (cache_is_vivt()) { 231 flush_cache_page(vma, vmaddr, pfn); 232 } else { 233 /* 234 * For aliasing VIPT, we can flush an alias of the 235 * userspace address only. 236 */ 237 flush_pfn_alias(pfn, vmaddr); 238 } 239 240 /* 241 * Invalidate kernel mapping. No data should be contained 242 * in this mapping of the page. FIXME: this is overkill 243 * since we actually ask for a write-back and invalidate. 244 */ 245 __cpuc_flush_dcache_page(page_address(page)); 246 } 247