1 /* 2 * linux/arch/arm/mm/flush.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/system.h> 16 #include <asm/tlbflush.h> 17 18 #ifdef CONFIG_CPU_CACHE_VIPT 19 #define ALIAS_FLUSH_START 0xffff4000 20 21 #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 22 23 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 24 { 25 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 26 27 set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); 28 flush_tlb_kernel_page(to); 29 30 asm( "mcrr p15, 0, %1, %0, c14\n" 31 " mcrr p15, 0, %1, %0, c5\n" 32 : 33 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) 34 : "cc"); 35 } 36 #else 37 #define flush_pfn_alias(pfn,vaddr) do { } while (0) 38 #endif 39 40 static void __flush_dcache_page(struct address_space *mapping, struct page *page) 41 { 42 struct mm_struct *mm = current->active_mm; 43 struct vm_area_struct *mpnt; 44 struct prio_tree_iter iter; 45 pgoff_t pgoff; 46 47 /* 48 * Writeback any data associated with the kernel mapping of this 49 * page. This ensures that data in the physical page is mutually 50 * coherent with the kernels mapping. 51 */ 52 __cpuc_flush_dcache_page(page_address(page)); 53 54 /* 55 * If there's no mapping pointer here, then this page isn't 56 * visible to userspace yet, so there are no cache lines 57 * associated with any other aliases. 58 */ 59 if (!mapping) 60 return; 61 62 /* 63 * This is a page cache page. If we have a VIPT cache, we 64 * only need to do one flush - which would be at the relevant 65 * userspace colour, which is congruent with page->index. 66 */ 67 if (cache_is_vipt()) { 68 if (cache_is_vipt_aliasing()) 69 flush_pfn_alias(page_to_pfn(page), 70 page->index << PAGE_CACHE_SHIFT); 71 return; 72 } 73 74 /* 75 * There are possible user space mappings of this page: 76 * - VIVT cache: we need to also write back and invalidate all user 77 * data in the current VM view associated with this page. 78 * - aliasing VIPT: we only need to find one mapping of this page. 79 */ 80 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 81 82 flush_dcache_mmap_lock(mapping); 83 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 84 unsigned long offset; 85 86 /* 87 * If this VMA is not in our MM, we can ignore it. 88 */ 89 if (mpnt->vm_mm != mm) 90 continue; 91 if (!(mpnt->vm_flags & VM_MAYSHARE)) 92 continue; 93 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 94 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); 95 } 96 flush_dcache_mmap_unlock(mapping); 97 } 98 99 /* 100 * Ensure cache coherency between kernel mapping and userspace mapping 101 * of this page. 102 * 103 * We have three cases to consider: 104 * - VIPT non-aliasing cache: fully coherent so nothing required. 105 * - VIVT: fully aliasing, so we need to handle every alias in our 106 * current VM view. 107 * - VIPT aliasing: need to handle one alias in our current VM view. 108 * 109 * If we need to handle aliasing: 110 * If the page only exists in the page cache and there are no user 111 * space mappings, we can be lazy and remember that we may have dirty 112 * kernel cache lines for later. Otherwise, we assume we have 113 * aliasing mappings. 114 */ 115 void flush_dcache_page(struct page *page) 116 { 117 struct address_space *mapping = page_mapping(page); 118 119 if (cache_is_vipt_nonaliasing()) 120 return; 121 122 if (mapping && !mapping_mapped(mapping)) 123 set_bit(PG_dcache_dirty, &page->flags); 124 else 125 __flush_dcache_page(mapping, page); 126 } 127 EXPORT_SYMBOL(flush_dcache_page); 128