1 /* 2 * Based on arch/arm/include/asm/cacheflush.h 3 * 4 * Copyright (C) 1999-2002 Russell King. 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef __ASM_CACHEFLUSH_H 20 #define __ASM_CACHEFLUSH_H 21 22 #include <linux/mm.h> 23 24 /* 25 * This flag is used to indicate that the page pointed to by a pte is clean 26 * and does not require cleaning before returning it to the user. 27 */ 28 #define PG_dcache_clean PG_arch_1 29 30 /* 31 * MM Cache Management 32 * =================== 33 * 34 * The arch/arm64/mm/cache.S implements these methods. 35 * 36 * Start addresses are inclusive and end addresses are exclusive; start 37 * addresses should be rounded down, end addresses up. 38 * 39 * See Documentation/cachetlb.txt for more information. Please note that 40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing) 41 * VIPT or ASID-tagged VIVT I-cache. 42 * 43 * flush_cache_mm(mm) 44 * 45 * Clean and invalidate all user space cache entries 46 * before a change of page tables. 47 * 48 * flush_icache_range(start, end) 49 * 50 * Ensure coherency between the I-cache and the D-cache in the 51 * region described by start, end. 52 * - start - virtual start address 53 * - end - virtual end address 54 * 55 * __flush_cache_user_range(start, end) 56 * 57 * Ensure coherency between the I-cache and the D-cache in the 58 * region described by start, end. 59 * - start - virtual start address 60 * - end - virtual end address 61 * 62 * __flush_dcache_area(kaddr, size) 63 * 64 * Ensure that the data held in page is written back. 65 * - kaddr - page address 66 * - size - region size 67 */ 68 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 69 extern void flush_icache_range(unsigned long start, unsigned long end); 70 extern void __flush_dcache_area(void *addr, size_t len); 71 extern long __flush_cache_user_range(unsigned long start, unsigned long end); 72 73 static inline void flush_cache_mm(struct mm_struct *mm) 74 { 75 } 76 77 static inline void flush_cache_page(struct vm_area_struct *vma, 78 unsigned long user_addr, unsigned long pfn) 79 { 80 } 81 82 /* 83 * Cache maintenance functions used by the DMA API. No to be used directly. 84 */ 85 extern void __dma_map_area(const void *, size_t, int); 86 extern void __dma_unmap_area(const void *, size_t, int); 87 extern void __dma_flush_range(const void *, const void *); 88 89 /* 90 * Copy user data from/to a page which is mapped into a different 91 * processes address space. Really, we want to allow our "user 92 * space" model to handle this. 93 */ 94 extern void copy_to_user_page(struct vm_area_struct *, struct page *, 95 unsigned long, void *, const void *, unsigned long); 96 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 97 do { \ 98 memcpy(dst, src, len); \ 99 } while (0) 100 101 #define flush_cache_dup_mm(mm) flush_cache_mm(mm) 102 103 /* 104 * flush_dcache_page is used when the kernel has written to the page 105 * cache page at virtual address page->virtual. 106 * 107 * If this page isn't mapped (ie, page_mapping == NULL), or it might 108 * have userspace mappings, then we _must_ always clean + invalidate 109 * the dcache entries associated with the kernel mapping. 110 * 111 * Otherwise we can defer the operation, and clean the cache when we are 112 * about to change to user space. This is the same method as used on SPARC64. 113 * See update_mmu_cache for the user space part. 114 */ 115 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 116 extern void flush_dcache_page(struct page *); 117 118 static inline void __flush_icache_all(void) 119 { 120 asm("ic ialluis"); 121 dsb(ish); 122 } 123 124 #define flush_dcache_mmap_lock(mapping) \ 125 spin_lock_irq(&(mapping)->tree_lock) 126 #define flush_dcache_mmap_unlock(mapping) \ 127 spin_unlock_irq(&(mapping)->tree_lock) 128 129 /* 130 * We don't appear to need to do anything here. In fact, if we did, we'd 131 * duplicate cache flushing elsewhere performed by flush_dcache_page(). 132 */ 133 #define flush_icache_page(vma,page) do { } while (0) 134 135 /* 136 * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache). 137 */ 138 static inline void flush_cache_vmap(unsigned long start, unsigned long end) 139 { 140 } 141 142 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 143 { 144 } 145 146 int set_memory_ro(unsigned long addr, int numpages); 147 int set_memory_rw(unsigned long addr, int numpages); 148 int set_memory_x(unsigned long addr, int numpages); 149 int set_memory_nx(unsigned long addr, int numpages); 150 151 #ifdef CONFIG_DEBUG_RODATA 152 void mark_rodata_ro(void); 153 #endif 154 155 #endif 156