1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_CACHEFLUSH_H
10 #define _ASM_CACHEFLUSH_H
11
12 /* Keep includes the same across arches. */
13 #include <linux/mm.h>
14 #include <asm/cpu-features.h>
15
16 /* Cache flushing:
17 *
18 * - flush_cache_all() flushes entire cache
19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20 * - flush_cache_dup mm(mm) handles cache flushing when forking
21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
22 * - flush_cache_range(vma, start, end) flushes a range of pages
23 * - flush_icache_range(start, end) flush a range of instructions
24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
25 *
26 * MIPS specific flush operations:
27 *
28 * - flush_icache_all() flush the entire instruction cache
29 * - flush_data_cache_page() flushes a page from the data cache
30 * - __flush_icache_user_range(start, end) flushes range of user instructions
31 */
32
33 /*
34 * This flag is used to indicate that the page pointed to by a pte
35 * is dirty and requires cleaning before returning it to the user.
36 */
37 #define PG_dcache_dirty PG_arch_1
38
39 #define folio_test_dcache_dirty(folio) \
40 test_bit(PG_dcache_dirty, &(folio)->flags.f)
41 #define folio_set_dcache_dirty(folio) \
42 set_bit(PG_dcache_dirty, &(folio)->flags.f)
43 #define folio_clear_dcache_dirty(folio) \
44 clear_bit(PG_dcache_dirty, &(folio)->flags.f)
45
46 extern void (*flush_cache_all)(void);
47 extern void (*__flush_cache_all)(void);
48 extern void (*flush_cache_mm)(struct mm_struct *mm);
49 #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
50 extern void (*flush_cache_range)(struct vm_area_struct *vma,
51 unsigned long start, unsigned long end);
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53 void __flush_dcache_folio_pages(struct folio *folio, struct page *page, unsigned int nr);
54
55 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
flush_dcache_folio(struct folio * folio)56 static inline void flush_dcache_folio(struct folio *folio)
57 {
58 if (cpu_has_dc_aliases)
59 __flush_dcache_folio_pages(folio, folio_page(folio, 0),
60 folio_nr_pages(folio));
61 else if (!cpu_has_ic_fills_f_dc)
62 folio_set_dcache_dirty(folio);
63 }
64 #define flush_dcache_folio flush_dcache_folio
65
flush_dcache_page(struct page * page)66 static inline void flush_dcache_page(struct page *page)
67 {
68 struct folio *folio = page_folio(page);
69
70 if (cpu_has_dc_aliases)
71 __flush_dcache_folio_pages(folio, page, 1);
72 else if (!cpu_has_ic_fills_f_dc)
73 folio_set_dcache_dirty(folio);
74 }
75
76 #define flush_dcache_mmap_lock(mapping) do { } while (0)
77 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
78
79 #define ARCH_HAS_FLUSH_ANON_PAGE
80 extern void __flush_anon_page(struct page *, unsigned long);
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)81 static inline void flush_anon_page(struct vm_area_struct *vma,
82 struct page *page, unsigned long vmaddr)
83 {
84 if (cpu_has_dc_aliases && PageAnon(page))
85 __flush_anon_page(page, vmaddr);
86 }
87
88 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
89 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
90 extern void (*__flush_icache_user_range)(unsigned long start,
91 unsigned long end);
92 extern void (*__local_flush_icache_user_range)(unsigned long start,
93 unsigned long end);
94
95 extern void (*__flush_cache_vmap)(void);
96
flush_cache_vmap(unsigned long start,unsigned long end)97 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
98 {
99 if (cpu_has_dc_aliases)
100 __flush_cache_vmap();
101 }
102
103 #define flush_cache_vmap_early(start, end) do { } while (0)
104
105 extern void (*__flush_cache_vunmap)(void);
106
flush_cache_vunmap(unsigned long start,unsigned long end)107 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
108 {
109 if (cpu_has_dc_aliases)
110 __flush_cache_vunmap();
111 }
112
113 extern void copy_to_user_page(struct vm_area_struct *vma,
114 struct page *page, unsigned long vaddr, void *dst, const void *src,
115 unsigned long len);
116
117 extern void copy_from_user_page(struct vm_area_struct *vma,
118 struct page *page, unsigned long vaddr, void *dst, const void *src,
119 unsigned long len);
120
121 extern void (*flush_icache_all)(void);
122 extern void (*flush_data_cache_page)(unsigned long addr);
123
124 /* Run kernel code uncached, useful for cache probing functions. */
125 unsigned long run_uncached(void *func);
126
127 extern void *kmap_coherent(struct page *page, unsigned long addr);
128 extern void kunmap_coherent(void);
129 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
130
kunmap_noncoherent(void)131 static inline void kunmap_noncoherent(void)
132 {
133 kunmap_coherent();
134 }
135
136 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
137 /*
138 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
139 * cache writeback and invalidate operation.
140 */
141 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
142
flush_kernel_vmap_range(void * vaddr,int size)143 static inline void flush_kernel_vmap_range(void *vaddr, int size)
144 {
145 if (cpu_has_dc_aliases)
146 __flush_kernel_vmap_range((unsigned long) vaddr, size);
147 }
148
invalidate_kernel_vmap_range(void * vaddr,int size)149 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
150 {
151 if (cpu_has_dc_aliases)
152 __flush_kernel_vmap_range((unsigned long) vaddr, size);
153 }
154
155 #endif /* _ASM_CACHEFLUSH_H */
156