xref: /linux/arch/x86/include/asm/tlbflush.h (revision 442f04c34a1a467759d024a1d2c1df0f744dcb06)
1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
10 
11 #ifdef CONFIG_PARAVIRT
12 #include <asm/paravirt.h>
13 #else
14 #define __flush_tlb() __native_flush_tlb()
15 #define __flush_tlb_global() __native_flush_tlb_global()
16 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
17 #endif
18 
19 struct tlb_state {
20 #ifdef CONFIG_SMP
21 	struct mm_struct *active_mm;
22 	int state;
23 #endif
24 
25 	/*
26 	 * Access to this CR4 shadow and to H/W CR4 is protected by
27 	 * disabling interrupts when modifying either one.
28 	 */
29 	unsigned long cr4;
30 };
31 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
32 
33 /* Initialize cr4 shadow for this CPU. */
34 static inline void cr4_init_shadow(void)
35 {
36 	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
37 }
38 
39 /* Set in this cpu's CR4. */
40 static inline void cr4_set_bits(unsigned long mask)
41 {
42 	unsigned long cr4;
43 
44 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
45 	if ((cr4 | mask) != cr4) {
46 		cr4 |= mask;
47 		this_cpu_write(cpu_tlbstate.cr4, cr4);
48 		__write_cr4(cr4);
49 	}
50 }
51 
52 /* Clear in this cpu's CR4. */
53 static inline void cr4_clear_bits(unsigned long mask)
54 {
55 	unsigned long cr4;
56 
57 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
58 	if ((cr4 & ~mask) != cr4) {
59 		cr4 &= ~mask;
60 		this_cpu_write(cpu_tlbstate.cr4, cr4);
61 		__write_cr4(cr4);
62 	}
63 }
64 
65 /* Read the CR4 shadow. */
66 static inline unsigned long cr4_read_shadow(void)
67 {
68 	return this_cpu_read(cpu_tlbstate.cr4);
69 }
70 
71 /*
72  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
73  * enable and PPro Global page enable), so that any CPU's that boot
74  * up after us can get the correct flags.  This should only be used
75  * during boot on the boot cpu.
76  */
77 extern unsigned long mmu_cr4_features;
78 extern u32 *trampoline_cr4_features;
79 
80 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
81 {
82 	mmu_cr4_features |= mask;
83 	if (trampoline_cr4_features)
84 		*trampoline_cr4_features = mmu_cr4_features;
85 	cr4_set_bits(mask);
86 }
87 
88 static inline void __native_flush_tlb(void)
89 {
90 	native_write_cr3(native_read_cr3());
91 }
92 
93 static inline void __native_flush_tlb_global_irq_disabled(void)
94 {
95 	unsigned long cr4;
96 
97 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
98 	/* clear PGE */
99 	native_write_cr4(cr4 & ~X86_CR4_PGE);
100 	/* write old PGE again and flush TLBs */
101 	native_write_cr4(cr4);
102 }
103 
104 static inline void __native_flush_tlb_global(void)
105 {
106 	unsigned long flags;
107 
108 	/*
109 	 * Read-modify-write to CR4 - protect it from preemption and
110 	 * from interrupts. (Use the raw variant because this code can
111 	 * be called from deep inside debugging code.)
112 	 */
113 	raw_local_irq_save(flags);
114 
115 	__native_flush_tlb_global_irq_disabled();
116 
117 	raw_local_irq_restore(flags);
118 }
119 
120 static inline void __native_flush_tlb_single(unsigned long addr)
121 {
122 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
123 }
124 
125 static inline void __flush_tlb_all(void)
126 {
127 	if (cpu_has_pge)
128 		__flush_tlb_global();
129 	else
130 		__flush_tlb();
131 }
132 
133 static inline void __flush_tlb_one(unsigned long addr)
134 {
135 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
136 	__flush_tlb_single(addr);
137 }
138 
139 #define TLB_FLUSH_ALL	-1UL
140 
141 /*
142  * TLB flushing:
143  *
144  *  - flush_tlb() flushes the current mm struct TLBs
145  *  - flush_tlb_all() flushes all processes TLBs
146  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
147  *  - flush_tlb_page(vma, vmaddr) flushes one page
148  *  - flush_tlb_range(vma, start, end) flushes a range of pages
149  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
150  *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
151  *
152  * ..but the i386 has somewhat limited tlb flushing capabilities,
153  * and page-granular flushes are available only on i486 and up.
154  */
155 
156 #ifndef CONFIG_SMP
157 
158 /* "_up" is for UniProcessor.
159  *
160  * This is a helper for other header functions.  *Not* intended to be called
161  * directly.  All global TLB flushes need to either call this, or to bump the
162  * vm statistics themselves.
163  */
164 static inline void __flush_tlb_up(void)
165 {
166 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
167 	__flush_tlb();
168 }
169 
170 static inline void flush_tlb_all(void)
171 {
172 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
173 	__flush_tlb_all();
174 }
175 
176 static inline void flush_tlb(void)
177 {
178 	__flush_tlb_up();
179 }
180 
181 static inline void local_flush_tlb(void)
182 {
183 	__flush_tlb_up();
184 }
185 
186 static inline void flush_tlb_mm(struct mm_struct *mm)
187 {
188 	if (mm == current->active_mm)
189 		__flush_tlb_up();
190 }
191 
192 static inline void flush_tlb_page(struct vm_area_struct *vma,
193 				  unsigned long addr)
194 {
195 	if (vma->vm_mm == current->active_mm)
196 		__flush_tlb_one(addr);
197 }
198 
199 static inline void flush_tlb_range(struct vm_area_struct *vma,
200 				   unsigned long start, unsigned long end)
201 {
202 	if (vma->vm_mm == current->active_mm)
203 		__flush_tlb_up();
204 }
205 
206 static inline void flush_tlb_mm_range(struct mm_struct *mm,
207 	   unsigned long start, unsigned long end, unsigned long vmflag)
208 {
209 	if (mm == current->active_mm)
210 		__flush_tlb_up();
211 }
212 
213 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
214 					   struct mm_struct *mm,
215 					   unsigned long start,
216 					   unsigned long end)
217 {
218 }
219 
220 static inline void reset_lazy_tlbstate(void)
221 {
222 }
223 
224 static inline void flush_tlb_kernel_range(unsigned long start,
225 					  unsigned long end)
226 {
227 	flush_tlb_all();
228 }
229 
230 #else  /* SMP */
231 
232 #include <asm/smp.h>
233 
234 #define local_flush_tlb() __flush_tlb()
235 
236 #define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
237 
238 #define flush_tlb_range(vma, start, end)	\
239 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
240 
241 extern void flush_tlb_all(void);
242 extern void flush_tlb_current_task(void);
243 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
244 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
245 				unsigned long end, unsigned long vmflag);
246 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
247 
248 #define flush_tlb()	flush_tlb_current_task()
249 
250 void native_flush_tlb_others(const struct cpumask *cpumask,
251 				struct mm_struct *mm,
252 				unsigned long start, unsigned long end);
253 
254 #define TLBSTATE_OK	1
255 #define TLBSTATE_LAZY	2
256 
257 static inline void reset_lazy_tlbstate(void)
258 {
259 	this_cpu_write(cpu_tlbstate.state, 0);
260 	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
261 }
262 
263 #endif	/* SMP */
264 
265 /* Not inlined due to inc_irq_stat not being defined yet */
266 #define flush_tlb_local() {		\
267 	inc_irq_stat(irq_tlb_count);	\
268 	local_flush_tlb();		\
269 }
270 
271 #ifndef CONFIG_PARAVIRT
272 #define flush_tlb_others(mask, mm, start, end)	\
273 	native_flush_tlb_others(mask, mm, start, end)
274 #endif
275 
276 #endif /* _ASM_X86_TLBFLUSH_H */
277