xref: /linux/arch/x86/mm/tlb.c (revision e73173dbe55e5b4c2306728aad50c8e42194f6d5)
1 #include <linux/init.h>
2 
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 
9 #include <asm/tlbflush.h>
10 #include <asm/mmu_context.h>
11 #include <asm/apic.h>
12 #include <asm/uv/uv.h>
13 
14 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
15 			= { &init_mm, 0, };
16 
17 /*
18  *	Smarter SMP flushing macros.
19  *		c/o Linus Torvalds.
20  *
21  *	These mean you can really definitely utterly forget about
22  *	writing to user space from interrupts. (Its not allowed anyway).
23  *
24  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
25  *
26  *	More scalable flush, from Andi Kleen
27  *
28  *	To avoid global state use 8 different call vectors.
29  *	Each CPU uses a specific vector to trigger flushes on other
30  *	CPUs. Depending on the received vector the target CPUs look into
31  *	the right array slot for the flush data.
32  *
33  *	With more than 8 CPUs they are hashed to the 8 available
34  *	vectors. The limited global vector space forces us to this right now.
35  *	In future when interrupts are split into per CPU domains this could be
36  *	fixed, at the cost of triggering multiple IPIs in some cases.
37  */
38 
39 union smp_flush_state {
40 	struct {
41 		struct mm_struct *flush_mm;
42 		unsigned long flush_va;
43 		spinlock_t tlbstate_lock;
44 		DECLARE_BITMAP(flush_cpumask, NR_CPUS);
45 	};
46 	char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
47 } ____cacheline_internodealigned_in_smp;
48 
49 /* State is put into the per CPU data section, but padded
50    to a full cache line because other CPUs can access it and we don't
51    want false sharing in the per cpu data segment. */
52 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
53 
54 /*
55  * We cannot call mmdrop() because we are in interrupt context,
56  * instead update mm->cpu_vm_mask.
57  */
58 void leave_mm(int cpu)
59 {
60 	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
61 		BUG();
62 	cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
63 	load_cr3(swapper_pg_dir);
64 }
65 EXPORT_SYMBOL_GPL(leave_mm);
66 
67 /*
68  *
69  * The flush IPI assumes that a thread switch happens in this order:
70  * [cpu0: the cpu that switches]
71  * 1) switch_mm() either 1a) or 1b)
72  * 1a) thread switch to a different mm
73  * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
74  *	Stop ipi delivery for the old mm. This is not synchronized with
75  *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
76  *	for the wrong mm, and in the worst case we perform a superfluous
77  *	tlb flush.
78  * 1a2) set cpu mmu_state to TLBSTATE_OK
79  *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
80  *	was in lazy tlb mode.
81  * 1a3) update cpu active_mm
82  *	Now cpu0 accepts tlb flushes for the new mm.
83  * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
84  *	Now the other cpus will send tlb flush ipis.
85  * 1a4) change cr3.
86  * 1b) thread switch without mm change
87  *	cpu active_mm is correct, cpu0 already handles
88  *	flush ipis.
89  * 1b1) set cpu mmu_state to TLBSTATE_OK
90  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
91  *	Atomically set the bit [other cpus will start sending flush ipis],
92  *	and test the bit.
93  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
94  * 2) switch %%esp, ie current
95  *
96  * The interrupt must handle 2 special cases:
97  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
98  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
99  *   runs in kernel space, the cpu could load tlb entries for user space
100  *   pages.
101  *
102  * The good news is that cpu mmu_state is local to each cpu, no
103  * write/read ordering problems.
104  */
105 
106 /*
107  * TLB flush IPI:
108  *
109  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
110  * 2) Leave the mm if we are in the lazy tlb mode.
111  *
112  * Interrupts are disabled.
113  */
114 
115 /*
116  * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
117  * but still used for documentation purpose but the usage is slightly
118  * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
119  * entry calls in with the first parameter in %eax.  Maybe define
120  * intrlinkage?
121  */
122 #ifdef CONFIG_X86_64
123 asmlinkage
124 #endif
125 void smp_invalidate_interrupt(struct pt_regs *regs)
126 {
127 	unsigned int cpu;
128 	unsigned int sender;
129 	union smp_flush_state *f;
130 
131 	cpu = smp_processor_id();
132 	/*
133 	 * orig_rax contains the negated interrupt vector.
134 	 * Use that to determine where the sender put the data.
135 	 */
136 	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
137 	f = &flush_state[sender];
138 
139 	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
140 		goto out;
141 		/*
142 		 * This was a BUG() but until someone can quote me the
143 		 * line from the intel manual that guarantees an IPI to
144 		 * multiple CPUs is retried _only_ on the erroring CPUs
145 		 * its staying as a return
146 		 *
147 		 * BUG();
148 		 */
149 
150 	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
151 		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
152 			if (f->flush_va == TLB_FLUSH_ALL)
153 				local_flush_tlb();
154 			else
155 				__flush_tlb_one(f->flush_va);
156 		} else
157 			leave_mm(cpu);
158 	}
159 out:
160 	ack_APIC_irq();
161 	smp_mb__before_clear_bit();
162 	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
163 	smp_mb__after_clear_bit();
164 	inc_irq_stat(irq_tlb_count);
165 }
166 
167 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
168 				 struct mm_struct *mm, unsigned long va)
169 {
170 	unsigned int sender;
171 	union smp_flush_state *f;
172 
173 	/* Caller has disabled preemption */
174 	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
175 	f = &flush_state[sender];
176 
177 	/*
178 	 * Could avoid this lock when
179 	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
180 	 * probably not worth checking this for a cache-hot lock.
181 	 */
182 	spin_lock(&f->tlbstate_lock);
183 
184 	f->flush_mm = mm;
185 	f->flush_va = va;
186 	cpumask_andnot(to_cpumask(f->flush_cpumask),
187 		       cpumask, cpumask_of(smp_processor_id()));
188 
189 	/*
190 	 * We have to send the IPI only to
191 	 * CPUs affected.
192 	 */
193 	apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
194 		      INVALIDATE_TLB_VECTOR_START + sender);
195 
196 	while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
197 		cpu_relax();
198 
199 	f->flush_mm = NULL;
200 	f->flush_va = 0;
201 	spin_unlock(&f->tlbstate_lock);
202 }
203 
204 void native_flush_tlb_others(const struct cpumask *cpumask,
205 			     struct mm_struct *mm, unsigned long va)
206 {
207 	if (is_uv_system()) {
208 		unsigned int cpu;
209 
210 		cpu = get_cpu();
211 		cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
212 		if (cpumask)
213 			flush_tlb_others_ipi(cpumask, mm, va);
214 		put_cpu();
215 		return;
216 	}
217 	flush_tlb_others_ipi(cpumask, mm, va);
218 }
219 
220 static int __cpuinit init_smp_flush(void)
221 {
222 	int i;
223 
224 	for (i = 0; i < ARRAY_SIZE(flush_state); i++)
225 		spin_lock_init(&flush_state[i].tlbstate_lock);
226 
227 	return 0;
228 }
229 core_initcall(init_smp_flush);
230 
231 void flush_tlb_current_task(void)
232 {
233 	struct mm_struct *mm = current->mm;
234 
235 	preempt_disable();
236 
237 	local_flush_tlb();
238 	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
239 		flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
240 	preempt_enable();
241 }
242 
243 void flush_tlb_mm(struct mm_struct *mm)
244 {
245 	preempt_disable();
246 
247 	if (current->active_mm == mm) {
248 		if (current->mm)
249 			local_flush_tlb();
250 		else
251 			leave_mm(smp_processor_id());
252 	}
253 	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
254 		flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
255 
256 	preempt_enable();
257 }
258 
259 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
260 {
261 	struct mm_struct *mm = vma->vm_mm;
262 
263 	preempt_disable();
264 
265 	if (current->active_mm == mm) {
266 		if (current->mm)
267 			__flush_tlb_one(va);
268 		else
269 			leave_mm(smp_processor_id());
270 	}
271 
272 	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
273 		flush_tlb_others(&mm->cpu_vm_mask, mm, va);
274 
275 	preempt_enable();
276 }
277 
278 static void do_flush_tlb_all(void *info)
279 {
280 	unsigned long cpu = smp_processor_id();
281 
282 	__flush_tlb_all();
283 	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
284 		leave_mm(cpu);
285 }
286 
287 void flush_tlb_all(void)
288 {
289 	on_each_cpu(do_flush_tlb_all, NULL, 1);
290 }
291