1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_CACHEFLUSH_H 10 #define _ASM_CACHEFLUSH_H 11 12 /* Keep includes the same across arches. */ 13 #include <linux/mm.h> 14 #include <asm/cpu-features.h> 15 16 /* Cache flushing: 17 * 18 * - flush_cache_all() flushes entire cache 19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines 20 * - flush_cache_dup mm(mm) handles cache flushing when forking 21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page 22 * - flush_cache_range(vma, start, end) flushes a range of pages 23 * - flush_icache_range(start, end) flush a range of instructions 24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache 25 * 26 * MIPS specific flush operations: 27 * 28 * - flush_cache_sigtramp() flush signal trampoline 29 * - flush_icache_all() flush the entire instruction cache 30 * - flush_data_cache_page() flushes a page from the data cache 31 */ 32 extern void (*flush_cache_all)(void); 33 extern void (*__flush_cache_all)(void); 34 extern void (*flush_cache_mm)(struct mm_struct *mm); 35 #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0) 36 extern void (*flush_cache_range)(struct vm_area_struct *vma, 37 unsigned long start, unsigned long end); 38 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); 39 extern void __flush_dcache_page(struct page *page); 40 41 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 42 static inline void flush_dcache_page(struct page *page) 43 { 44 if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) 45 __flush_dcache_page(page); 46 47 } 48 49 #define flush_dcache_mmap_lock(mapping) do { } while (0) 50 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 51 52 #define ARCH_HAS_FLUSH_ANON_PAGE 53 extern void __flush_anon_page(struct page *, unsigned long); 54 static inline void flush_anon_page(struct vm_area_struct *vma, 55 struct page *page, unsigned long vmaddr) 56 { 57 if (cpu_has_dc_aliases && PageAnon(page)) 58 __flush_anon_page(page, vmaddr); 59 } 60 61 static inline void flush_icache_page(struct vm_area_struct *vma, 62 struct page *page) 63 { 64 } 65 66 extern void (*flush_icache_range)(unsigned long start, unsigned long end); 67 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); 68 69 extern void (*__flush_cache_vmap)(void); 70 71 static inline void flush_cache_vmap(unsigned long start, unsigned long end) 72 { 73 if (cpu_has_dc_aliases) 74 __flush_cache_vmap(); 75 } 76 77 extern void (*__flush_cache_vunmap)(void); 78 79 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 80 { 81 if (cpu_has_dc_aliases) 82 __flush_cache_vunmap(); 83 } 84 85 extern void copy_to_user_page(struct vm_area_struct *vma, 86 struct page *page, unsigned long vaddr, void *dst, const void *src, 87 unsigned long len); 88 89 extern void copy_from_user_page(struct vm_area_struct *vma, 90 struct page *page, unsigned long vaddr, void *dst, const void *src, 91 unsigned long len); 92 93 extern void (*flush_cache_sigtramp)(unsigned long addr); 94 extern void (*flush_icache_all)(void); 95 extern void (*local_flush_data_cache_page)(void * addr); 96 extern void (*flush_data_cache_page)(unsigned long addr); 97 98 /* 99 * This flag is used to indicate that the page pointed to by a pte 100 * is dirty and requires cleaning before returning it to the user. 101 */ 102 #define PG_dcache_dirty PG_arch_1 103 104 #define Page_dcache_dirty(page) \ 105 test_bit(PG_dcache_dirty, &(page)->flags) 106 #define SetPageDcacheDirty(page) \ 107 set_bit(PG_dcache_dirty, &(page)->flags) 108 #define ClearPageDcacheDirty(page) \ 109 clear_bit(PG_dcache_dirty, &(page)->flags) 110 111 /* Run kernel code uncached, useful for cache probing functions. */ 112 unsigned long run_uncached(void *func); 113 114 extern void *kmap_coherent(struct page *page, unsigned long addr); 115 extern void kunmap_coherent(void); 116 extern void *kmap_noncoherent(struct page *page, unsigned long addr); 117 118 static inline void kunmap_noncoherent(void) 119 { 120 kunmap_coherent(); 121 } 122 123 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 124 static inline void flush_kernel_dcache_page(struct page *page) 125 { 126 BUG_ON(cpu_has_dc_aliases && PageHighMem(page)); 127 } 128 129 /* 130 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a 131 * cache writeback and invalidate operation. 132 */ 133 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 134 135 static inline void flush_kernel_vmap_range(void *vaddr, int size) 136 { 137 if (cpu_has_dc_aliases) 138 __flush_kernel_vmap_range((unsigned long) vaddr, size); 139 } 140 141 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 142 { 143 if (cpu_has_dc_aliases) 144 __flush_kernel_vmap_range((unsigned long) vaddr, size); 145 } 146 147 #endif /* _ASM_CACHEFLUSH_H */ 148