xref: /linux/arch/x86/mm/tlb.c (revision c7864eeaa4b743bffe4abb396a03b9d4730195fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/init.h>
3 
4 #include <linux/mm.h>
5 #include <linux/spinlock.h>
6 #include <linux/smp.h>
7 #include <linux/interrupt.h>
8 #include <linux/export.h>
9 #include <linux/cpu.h>
10 #include <linux/debugfs.h>
11 #include <linux/sched/smt.h>
12 #include <linux/task_work.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
15 
16 #include <asm/tlbflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/nospec-branch.h>
19 #include <asm/cache.h>
20 #include <asm/cacheflush.h>
21 #include <asm/apic.h>
22 #include <asm/msr.h>
23 #include <asm/perf_event.h>
24 #include <asm/tlb.h>
25 
26 #include "mm_internal.h"
27 
28 #ifdef CONFIG_PARAVIRT
29 # define STATIC_NOPV
30 #else
31 # define STATIC_NOPV			static
32 # define __flush_tlb_local		native_flush_tlb_local
33 # define __flush_tlb_global		native_flush_tlb_global
34 # define __flush_tlb_one_user(addr)	native_flush_tlb_one_user(addr)
35 # define __flush_tlb_multi(msk, info)	native_flush_tlb_multi(msk, info)
36 #endif
37 
38 /*
39  *	TLB flushing, formerly SMP-only
40  *		c/o Linus Torvalds.
41  *
42  *	These mean you can really definitely utterly forget about
43  *	writing to user space from interrupts. (Its not allowed anyway).
44  *
45  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
46  *
47  *	More scalable flush, from Andi Kleen
48  *
49  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
50  */
51 
52 /*
53  * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
54  * stored in cpu_tlb_state.last_user_mm_spec.
55  */
56 #define LAST_USER_MM_IBPB	0x1UL
57 #define LAST_USER_MM_L1D_FLUSH	0x2UL
58 #define LAST_USER_MM_SPEC_MASK	(LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH)
59 
60 /* Bits to set when tlbstate and flush is (re)initialized */
61 #define LAST_USER_MM_INIT	LAST_USER_MM_IBPB
62 
63 /*
64  * The x86 feature is called PCID (Process Context IDentifier). It is similar
65  * to what is traditionally called ASID on the RISC processors.
66  *
67  * We don't use the traditional ASID implementation, where each process/mm gets
68  * its own ASID and flush/restart when we run out of ASID space.
69  *
70  * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
71  * that came by on this CPU, allowing cheaper switch_mm between processes on
72  * this CPU.
73  *
74  * We end up with different spaces for different things. To avoid confusion we
75  * use different names for each of them:
76  *
77  * ASID  - [0, TLB_NR_DYN_ASIDS-1]
78  *         the canonical identifier for an mm, dynamically allocated on each CPU
79  *         [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
80  *         the canonical, global identifier for an mm, identical across all CPUs
81  *
82  * kPCID - [1, MAX_ASID_AVAILABLE]
83  *         the value we write into the PCID part of CR3; corresponds to the
84  *         ASID+1, because PCID 0 is special.
85  *
86  * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
87  *         for KPTI each mm has two address spaces and thus needs two
88  *         PCID values, but we can still do with a single ASID denomination
89  *         for each mm. Corresponds to kPCID + 2048.
90  *
91  */
92 
93 /*
94  * When enabled, MITIGATION_PAGE_TABLE_ISOLATION consumes a single bit for
95  * user/kernel switches
96  */
97 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
98 # define PTI_CONSUMED_PCID_BITS	1
99 #else
100 # define PTI_CONSUMED_PCID_BITS	0
101 #endif
102 
103 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
104 
105 /*
106  * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
107  * for them being zero-based.  Another -1 is because PCID 0 is reserved for
108  * use by non-PCID-aware users.
109  */
110 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
111 
112 /*
113  * Given @asid, compute kPCID
114  */
kern_pcid(u16 asid)115 static inline u16 kern_pcid(u16 asid)
116 {
117 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
118 
119 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
120 	/*
121 	 * Make sure that the dynamic ASID space does not conflict with the
122 	 * bit we are using to switch between user and kernel ASIDs.
123 	 */
124 	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
125 
126 	/*
127 	 * The ASID being passed in here should have respected the
128 	 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
129 	 */
130 	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
131 #endif
132 	/*
133 	 * The dynamically-assigned ASIDs that get passed in are small
134 	 * (<TLB_NR_DYN_ASIDS).  They never have the high switch bit set,
135 	 * so do not bother to clear it.
136 	 *
137 	 * If PCID is on, ASID-aware code paths put the ASID+1 into the
138 	 * PCID bits.  This serves two purposes.  It prevents a nasty
139 	 * situation in which PCID-unaware code saves CR3, loads some other
140 	 * value (with PCID == 0), and then restores CR3, thus corrupting
141 	 * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
142 	 * that any bugs involving loading a PCID-enabled CR3 with
143 	 * CR4.PCIDE off will trigger deterministically.
144 	 */
145 	return asid + 1;
146 }
147 
148 /*
149  * Given @asid, compute uPCID
150  */
user_pcid(u16 asid)151 static inline u16 user_pcid(u16 asid)
152 {
153 	u16 ret = kern_pcid(asid);
154 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
155 	ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
156 #endif
157 	return ret;
158 }
159 
build_cr3(pgd_t * pgd,u16 asid,unsigned long lam)160 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam)
161 {
162 	unsigned long cr3 = __sme_pa(pgd) | lam;
163 
164 	if (static_cpu_has(X86_FEATURE_PCID)) {
165 		cr3 |= kern_pcid(asid);
166 	} else {
167 		VM_WARN_ON_ONCE(asid != 0);
168 	}
169 
170 	return cr3;
171 }
172 
build_cr3_noflush(pgd_t * pgd,u16 asid,unsigned long lam)173 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid,
174 					      unsigned long lam)
175 {
176 	/*
177 	 * Use boot_cpu_has() instead of this_cpu_has() as this function
178 	 * might be called during early boot. This should work even after
179 	 * boot because all CPU's the have same capabilities:
180 	 */
181 	VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
182 	return build_cr3(pgd, asid, lam) | CR3_NOFLUSH;
183 }
184 
185 /*
186  * We get here when we do something requiring a TLB invalidation
187  * but could not go invalidate all of the contexts.  We do the
188  * necessary invalidation by clearing out the 'ctx_id' which
189  * forces a TLB flush when the context is loaded.
190  */
clear_asid_other(void)191 static void clear_asid_other(void)
192 {
193 	u16 asid;
194 
195 	/*
196 	 * This is only expected to be set if we have disabled
197 	 * kernel _PAGE_GLOBAL pages.
198 	 */
199 	if (!static_cpu_has(X86_FEATURE_PTI)) {
200 		WARN_ON_ONCE(1);
201 		return;
202 	}
203 
204 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
205 		/* Do not need to flush the current asid */
206 		if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
207 			continue;
208 		/*
209 		 * Make sure the next time we go to switch to
210 		 * this asid, we do a flush:
211 		 */
212 		this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
213 	}
214 	this_cpu_write(cpu_tlbstate.invalidate_other, false);
215 }
216 
217 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
218 
219 struct new_asid {
220 	unsigned int asid	: 16;
221 	unsigned int need_flush : 1;
222 };
223 
choose_new_asid(struct mm_struct * next,u64 next_tlb_gen)224 static struct new_asid choose_new_asid(struct mm_struct *next, u64 next_tlb_gen)
225 {
226 	struct new_asid ns;
227 	u16 asid;
228 
229 	if (!static_cpu_has(X86_FEATURE_PCID)) {
230 		ns.asid = 0;
231 		ns.need_flush = 1;
232 		return ns;
233 	}
234 
235 	/*
236 	 * TLB consistency for global ASIDs is maintained with hardware assisted
237 	 * remote TLB flushing. Global ASIDs are always up to date.
238 	 */
239 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
240 		u16 global_asid = mm_global_asid(next);
241 
242 		if (global_asid) {
243 			ns.asid = global_asid;
244 			ns.need_flush = 0;
245 			return ns;
246 		}
247 	}
248 
249 	if (this_cpu_read(cpu_tlbstate.invalidate_other))
250 		clear_asid_other();
251 
252 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
253 		if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
254 		    next->context.ctx_id)
255 			continue;
256 
257 		ns.asid = asid;
258 		ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen);
259 		return ns;
260 	}
261 
262 	/*
263 	 * We don't currently own an ASID slot on this CPU.
264 	 * Allocate a slot.
265 	 */
266 	ns.asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
267 	if (ns.asid >= TLB_NR_DYN_ASIDS) {
268 		ns.asid = 0;
269 		this_cpu_write(cpu_tlbstate.next_asid, 1);
270 	}
271 	ns.need_flush = true;
272 
273 	return ns;
274 }
275 
276 /*
277  * Global ASIDs are allocated for multi-threaded processes that are
278  * active on multiple CPUs simultaneously, giving each of those
279  * processes the same PCID on every CPU, for use with hardware-assisted
280  * TLB shootdown on remote CPUs, like AMD INVLPGB or Intel RAR.
281  *
282  * These global ASIDs are held for the lifetime of the process.
283  */
284 static DEFINE_RAW_SPINLOCK(global_asid_lock);
285 static u16 last_global_asid = MAX_ASID_AVAILABLE;
286 static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE);
287 static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE);
288 static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
289 
290 /*
291  * When the search for a free ASID in the global ASID space reaches
292  * MAX_ASID_AVAILABLE, a global TLB flush guarantees that previously
293  * freed global ASIDs are safe to re-use.
294  *
295  * This way the global flush only needs to happen at ASID rollover
296  * time, and not at ASID allocation time.
297  */
reset_global_asid_space(void)298 static void reset_global_asid_space(void)
299 {
300 	lockdep_assert_held(&global_asid_lock);
301 
302 	invlpgb_flush_all_nonglobals();
303 
304 	/*
305 	 * The TLB flush above makes it safe to re-use the previously
306 	 * freed global ASIDs.
307 	 */
308 	bitmap_andnot(global_asid_used, global_asid_used,
309 			global_asid_freed, MAX_ASID_AVAILABLE);
310 	bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
311 
312 	/* Restart the search from the start of global ASID space. */
313 	last_global_asid = TLB_NR_DYN_ASIDS;
314 }
315 
allocate_global_asid(void)316 static u16 allocate_global_asid(void)
317 {
318 	u16 asid;
319 
320 	lockdep_assert_held(&global_asid_lock);
321 
322 	/* The previous allocation hit the edge of available address space */
323 	if (last_global_asid >= MAX_ASID_AVAILABLE - 1)
324 		reset_global_asid_space();
325 
326 	asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid);
327 
328 	if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) {
329 		/* This should never happen. */
330 		VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n",
331 				global_asid_available);
332 		return 0;
333 	}
334 
335 	/* Claim this global ASID. */
336 	__set_bit(asid, global_asid_used);
337 	last_global_asid = asid;
338 	global_asid_available--;
339 	return asid;
340 }
341 
342 /*
343  * Check whether a process is currently active on more than @threshold CPUs.
344  * This is a cheap estimation on whether or not it may make sense to assign
345  * a global ASID to this process, and use broadcast TLB invalidation.
346  */
mm_active_cpus_exceeds(struct mm_struct * mm,int threshold)347 static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
348 {
349 	int count = 0;
350 	int cpu;
351 
352 	/* This quick check should eliminate most single threaded programs. */
353 	if (cpumask_weight(mm_cpumask(mm)) <= threshold)
354 		return false;
355 
356 	/* Slower check to make sure. */
357 	for_each_cpu(cpu, mm_cpumask(mm)) {
358 		/* Skip the CPUs that aren't really running this process. */
359 		if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
360 			continue;
361 
362 		if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
363 			continue;
364 
365 		if (++count > threshold)
366 			return true;
367 	}
368 	return false;
369 }
370 
371 /*
372  * Assign a global ASID to the current process, protecting against
373  * races between multiple threads in the process.
374  */
use_global_asid(struct mm_struct * mm)375 static void use_global_asid(struct mm_struct *mm)
376 {
377 	u16 asid;
378 
379 	guard(raw_spinlock_irqsave)(&global_asid_lock);
380 
381 	/* This process is already using broadcast TLB invalidation. */
382 	if (mm_global_asid(mm))
383 		return;
384 
385 	/*
386 	 * The last global ASID was consumed while waiting for the lock.
387 	 *
388 	 * If this fires, a more aggressive ASID reuse scheme might be
389 	 * needed.
390 	 */
391 	if (!global_asid_available) {
392 		VM_WARN_ONCE(1, "Ran out of global ASIDs\n");
393 		return;
394 	}
395 
396 	asid = allocate_global_asid();
397 	if (!asid)
398 		return;
399 
400 	mm_assign_global_asid(mm, asid);
401 }
402 
mm_free_global_asid(struct mm_struct * mm)403 void mm_free_global_asid(struct mm_struct *mm)
404 {
405 	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
406 		return;
407 
408 	if (!mm_global_asid(mm))
409 		return;
410 
411 	guard(raw_spinlock_irqsave)(&global_asid_lock);
412 
413 	/* The global ASID can be re-used only after flush at wrap-around. */
414 #ifdef CONFIG_BROADCAST_TLB_FLUSH
415 	__set_bit(mm->context.global_asid, global_asid_freed);
416 
417 	mm->context.global_asid = 0;
418 	global_asid_available++;
419 #endif
420 }
421 
422 /*
423  * Is the mm transitioning from a CPU-local ASID to a global ASID?
424  */
mm_needs_global_asid(struct mm_struct * mm,u16 asid)425 static bool mm_needs_global_asid(struct mm_struct *mm, u16 asid)
426 {
427 	u16 global_asid = mm_global_asid(mm);
428 
429 	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
430 		return false;
431 
432 	/* Process is transitioning to a global ASID */
433 	if (global_asid && asid != global_asid)
434 		return true;
435 
436 	return false;
437 }
438 
439 /*
440  * x86 has 4k ASIDs (2k when compiled with KPTI), but the largest x86
441  * systems have over 8k CPUs. Because of this potential ASID shortage,
442  * global ASIDs are handed out to processes that have frequent TLB
443  * flushes and are active on 4 or more CPUs simultaneously.
444  */
consider_global_asid(struct mm_struct * mm)445 static void consider_global_asid(struct mm_struct *mm)
446 {
447 	if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
448 		return;
449 
450 	/* Check every once in a while. */
451 	if ((current->pid & 0x1f) != (jiffies & 0x1f))
452 		return;
453 
454 	/*
455 	 * Assign a global ASID if the process is active on
456 	 * 4 or more CPUs simultaneously.
457 	 */
458 	if (mm_active_cpus_exceeds(mm, 3))
459 		use_global_asid(mm);
460 }
461 
finish_asid_transition(struct flush_tlb_info * info)462 static void finish_asid_transition(struct flush_tlb_info *info)
463 {
464 	struct mm_struct *mm = info->mm;
465 	int bc_asid = mm_global_asid(mm);
466 	int cpu;
467 
468 	if (!mm_in_asid_transition(mm))
469 		return;
470 
471 	for_each_cpu(cpu, mm_cpumask(mm)) {
472 		/*
473 		 * The remote CPU is context switching. Wait for that to
474 		 * finish, to catch the unlikely case of it switching to
475 		 * the target mm with an out of date ASID.
476 		 */
477 		while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
478 			cpu_relax();
479 
480 		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
481 			continue;
482 
483 		/*
484 		 * If at least one CPU is not using the global ASID yet,
485 		 * send a TLB flush IPI. The IPI should cause stragglers
486 		 * to transition soon.
487 		 *
488 		 * This can race with the CPU switching to another task;
489 		 * that results in a (harmless) extra IPI.
490 		 */
491 		if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
492 			flush_tlb_multi(mm_cpumask(info->mm), info);
493 			return;
494 		}
495 	}
496 
497 	/* All the CPUs running this process are using the global ASID. */
498 	mm_clear_asid_transition(mm);
499 }
500 
broadcast_tlb_flush(struct flush_tlb_info * info)501 static void broadcast_tlb_flush(struct flush_tlb_info *info)
502 {
503 	bool pmd = info->stride_shift == PMD_SHIFT;
504 	unsigned long asid = mm_global_asid(info->mm);
505 	unsigned long addr = info->start;
506 
507 	/*
508 	 * TLB flushes with INVLPGB are kicked off asynchronously.
509 	 * The inc_mm_tlb_gen() guarantees page table updates are done
510 	 * before these TLB flushes happen.
511 	 */
512 	if (info->end == TLB_FLUSH_ALL) {
513 		invlpgb_flush_single_pcid_nosync(kern_pcid(asid));
514 		/* Do any CPUs supporting INVLPGB need PTI? */
515 		if (cpu_feature_enabled(X86_FEATURE_PTI))
516 			invlpgb_flush_single_pcid_nosync(user_pcid(asid));
517 	} else do {
518 		unsigned long nr = 1;
519 
520 		if (info->stride_shift <= PMD_SHIFT) {
521 			nr = (info->end - addr) >> info->stride_shift;
522 			nr = clamp_val(nr, 1, invlpgb_count_max);
523 		}
524 
525 		invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd);
526 		if (cpu_feature_enabled(X86_FEATURE_PTI))
527 			invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd);
528 
529 		addr += nr << info->stride_shift;
530 	} while (addr < info->end);
531 
532 	finish_asid_transition(info);
533 
534 	/* Wait for the INVLPGBs kicked off above to finish. */
535 	__tlbsync();
536 }
537 
538 /*
539  * Given an ASID, flush the corresponding user ASID.  We can delay this
540  * until the next time we switch to it.
541  *
542  * See SWITCH_TO_USER_CR3.
543  */
invalidate_user_asid(u16 asid)544 static inline void invalidate_user_asid(u16 asid)
545 {
546 	/* There is no user ASID if address space separation is off */
547 	if (!IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION))
548 		return;
549 
550 	/*
551 	 * We only have a single ASID if PCID is off and the CR3
552 	 * write will have flushed it.
553 	 */
554 	if (!cpu_feature_enabled(X86_FEATURE_PCID))
555 		return;
556 
557 	if (!static_cpu_has(X86_FEATURE_PTI))
558 		return;
559 
560 	__set_bit(kern_pcid(asid),
561 		  (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
562 }
563 
load_new_mm_cr3(pgd_t * pgdir,u16 new_asid,unsigned long lam,bool need_flush)564 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, unsigned long lam,
565 			    bool need_flush)
566 {
567 	unsigned long new_mm_cr3;
568 
569 	if (need_flush) {
570 		invalidate_user_asid(new_asid);
571 		new_mm_cr3 = build_cr3(pgdir, new_asid, lam);
572 	} else {
573 		new_mm_cr3 = build_cr3_noflush(pgdir, new_asid, lam);
574 	}
575 
576 	/*
577 	 * Caution: many callers of this function expect
578 	 * that load_cr3() is serializing and orders TLB
579 	 * fills with respect to the mm_cpumask writes.
580 	 */
581 	write_cr3(new_mm_cr3);
582 }
583 
leave_mm(void)584 void leave_mm(void)
585 {
586 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
587 
588 	/*
589 	 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
590 	 * If so, our callers still expect us to flush the TLB, but there
591 	 * aren't any user TLB entries in init_mm to worry about.
592 	 *
593 	 * This needs to happen before any other sanity checks due to
594 	 * intel_idle's shenanigans.
595 	 */
596 	if (loaded_mm == &init_mm)
597 		return;
598 
599 	/* Warn if we're not lazy. */
600 	WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy));
601 
602 	switch_mm(NULL, &init_mm, NULL);
603 }
604 EXPORT_SYMBOL_GPL(leave_mm);
605 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)606 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
607 	       struct task_struct *tsk)
608 {
609 	unsigned long flags;
610 
611 	local_irq_save(flags);
612 	switch_mm_irqs_off(NULL, next, tsk);
613 	local_irq_restore(flags);
614 }
615 
616 /*
617  * Invoked from return to user/guest by a task that opted-in to L1D
618  * flushing but ended up running on an SMT enabled core due to wrong
619  * affinity settings or CPU hotplug. This is part of the paranoid L1D flush
620  * contract which this task requested.
621  */
l1d_flush_force_sigbus(struct callback_head * ch)622 static void l1d_flush_force_sigbus(struct callback_head *ch)
623 {
624 	force_sig(SIGBUS);
625 }
626 
l1d_flush_evaluate(unsigned long prev_mm,unsigned long next_mm,struct task_struct * next)627 static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
628 				struct task_struct *next)
629 {
630 	/* Flush L1D if the outgoing task requests it */
631 	if (prev_mm & LAST_USER_MM_L1D_FLUSH)
632 		wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
633 
634 	/* Check whether the incoming task opted in for L1D flush */
635 	if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
636 		return;
637 
638 	/*
639 	 * Validate that it is not running on an SMT sibling as this would
640 	 * make the exercise pointless because the siblings share L1D. If
641 	 * it runs on a SMT sibling, notify it with SIGBUS on return to
642 	 * user/guest
643 	 */
644 	if (this_cpu_read(cpu_info.smt_active)) {
645 		clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH);
646 		next->l1d_flush_kill.func = l1d_flush_force_sigbus;
647 		task_work_add(next, &next->l1d_flush_kill, TWA_RESUME);
648 	}
649 }
650 
mm_mangle_tif_spec_bits(struct task_struct * next)651 static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
652 {
653 	unsigned long next_tif = read_task_thread_flags(next);
654 	unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
655 
656 	/*
657 	 * Ensure that the bit shift above works as expected and the two flags
658 	 * end up in bit 0 and 1.
659 	 */
660 	BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1);
661 
662 	return (unsigned long)next->mm | spec_bits;
663 }
664 
cond_mitigation(struct task_struct * next)665 static void cond_mitigation(struct task_struct *next)
666 {
667 	unsigned long prev_mm, next_mm;
668 
669 	if (!next || !next->mm)
670 		return;
671 
672 	next_mm = mm_mangle_tif_spec_bits(next);
673 	prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
674 
675 	/*
676 	 * Avoid user->user BTB/RSB poisoning by flushing them when switching
677 	 * between processes. This stops one process from doing Spectre-v2
678 	 * attacks on another.
679 	 *
680 	 * Both, the conditional and the always IBPB mode use the mm
681 	 * pointer to avoid the IBPB when switching between tasks of the
682 	 * same process. Using the mm pointer instead of mm->context.ctx_id
683 	 * opens a hypothetical hole vs. mm_struct reuse, which is more or
684 	 * less impossible to control by an attacker. Aside of that it
685 	 * would only affect the first schedule so the theoretically
686 	 * exposed data is not really interesting.
687 	 */
688 	if (static_branch_likely(&switch_mm_cond_ibpb)) {
689 		/*
690 		 * This is a bit more complex than the always mode because
691 		 * it has to handle two cases:
692 		 *
693 		 * 1) Switch from a user space task (potential attacker)
694 		 *    which has TIF_SPEC_IB set to a user space task
695 		 *    (potential victim) which has TIF_SPEC_IB not set.
696 		 *
697 		 * 2) Switch from a user space task (potential attacker)
698 		 *    which has TIF_SPEC_IB not set to a user space task
699 		 *    (potential victim) which has TIF_SPEC_IB set.
700 		 *
701 		 * This could be done by unconditionally issuing IBPB when
702 		 * a task which has TIF_SPEC_IB set is either scheduled in
703 		 * or out. Though that results in two flushes when:
704 		 *
705 		 * - the same user space task is scheduled out and later
706 		 *   scheduled in again and only a kernel thread ran in
707 		 *   between.
708 		 *
709 		 * - a user space task belonging to the same process is
710 		 *   scheduled in after a kernel thread ran in between
711 		 *
712 		 * - a user space task belonging to the same process is
713 		 *   scheduled in immediately.
714 		 *
715 		 * Optimize this with reasonably small overhead for the
716 		 * above cases. Mangle the TIF_SPEC_IB bit into the mm
717 		 * pointer of the incoming task which is stored in
718 		 * cpu_tlbstate.last_user_mm_spec for comparison.
719 		 *
720 		 * Issue IBPB only if the mm's are different and one or
721 		 * both have the IBPB bit set.
722 		 */
723 		if (next_mm != prev_mm &&
724 		    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
725 			indirect_branch_prediction_barrier();
726 	}
727 
728 	if (static_branch_unlikely(&switch_mm_always_ibpb)) {
729 		/*
730 		 * Only flush when switching to a user space task with a
731 		 * different context than the user space task which ran
732 		 * last on this CPU.
733 		 */
734 		if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != (unsigned long)next->mm)
735 			indirect_branch_prediction_barrier();
736 	}
737 
738 	if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) {
739 		/*
740 		 * Flush L1D when the outgoing task requested it and/or
741 		 * check whether the incoming task requested L1D flushing
742 		 * and ended up on an SMT sibling.
743 		 */
744 		if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH))
745 			l1d_flush_evaluate(prev_mm, next_mm, next);
746 	}
747 
748 	this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
749 }
750 
751 #ifdef CONFIG_PERF_EVENTS
cr4_update_pce_mm(struct mm_struct * mm)752 static inline void cr4_update_pce_mm(struct mm_struct *mm)
753 {
754 	if (static_branch_unlikely(&rdpmc_always_available_key) ||
755 	    (!static_branch_unlikely(&rdpmc_never_available_key) &&
756 	     atomic_read(&mm->context.perf_rdpmc_allowed))) {
757 		/*
758 		 * Clear the existing dirty counters to
759 		 * prevent the leak for an RDPMC task.
760 		 */
761 		perf_clear_dirty_counters();
762 		cr4_set_bits_irqsoff(X86_CR4_PCE);
763 	} else
764 		cr4_clear_bits_irqsoff(X86_CR4_PCE);
765 }
766 
cr4_update_pce(void * ignored)767 void cr4_update_pce(void *ignored)
768 {
769 	cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
770 }
771 
772 #else
cr4_update_pce_mm(struct mm_struct * mm)773 static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
774 #endif
775 
776 /*
777  * This optimizes when not actually switching mm's.  Some architectures use the
778  * 'unused' argument for this optimization, but x86 must use
779  * 'cpu_tlbstate.loaded_mm' instead because it does not always keep
780  * 'current->active_mm' up to date.
781  */
switch_mm_irqs_off(struct mm_struct * unused,struct mm_struct * next,struct task_struct * tsk)782 void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
783 			struct task_struct *tsk)
784 {
785 	struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm);
786 	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
787 	bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
788 	unsigned cpu = smp_processor_id();
789 	unsigned long new_lam;
790 	struct new_asid ns;
791 	u64 next_tlb_gen;
792 
793 
794 	/* We don't want flush_tlb_func() to run concurrently with us. */
795 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
796 		WARN_ON_ONCE(!irqs_disabled());
797 
798 	/*
799 	 * Verify that CR3 is what we think it is.  This will catch
800 	 * hypothetical buggy code that directly switches to swapper_pg_dir
801 	 * without going through leave_mm() / switch_mm_irqs_off() or that
802 	 * does something like write_cr3(read_cr3_pa()).
803 	 *
804 	 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
805 	 * isn't free.
806 	 */
807 #ifdef CONFIG_DEBUG_VM
808 	if (WARN_ON_ONCE(__read_cr3() != build_cr3(prev->pgd, prev_asid,
809 						   tlbstate_lam_cr3_mask()))) {
810 		/*
811 		 * If we were to BUG here, we'd be very likely to kill
812 		 * the system so hard that we don't see the call trace.
813 		 * Try to recover instead by ignoring the error and doing
814 		 * a global flush to minimize the chance of corruption.
815 		 *
816 		 * (This is far from being a fully correct recovery.
817 		 *  Architecturally, the CPU could prefetch something
818 		 *  back into an incorrect ASID slot and leave it there
819 		 *  to cause trouble down the road.  It's better than
820 		 *  nothing, though.)
821 		 */
822 		__flush_tlb_all();
823 	}
824 #endif
825 	if (was_lazy)
826 		this_cpu_write(cpu_tlbstate_shared.is_lazy, false);
827 
828 	/*
829 	 * The membarrier system call requires a full memory barrier and
830 	 * core serialization before returning to user-space, after
831 	 * storing to rq->curr, when changing mm.  This is because
832 	 * membarrier() sends IPIs to all CPUs that are in the target mm
833 	 * to make them issue memory barriers.  However, if another CPU
834 	 * switches to/from the target mm concurrently with
835 	 * membarrier(), it can cause that CPU not to receive an IPI
836 	 * when it really should issue a memory barrier.  Writing to CR3
837 	 * provides that full memory barrier and core serializing
838 	 * instruction.
839 	 */
840 	if (prev == next) {
841 		/* Not actually switching mm's */
842 		VM_WARN_ON(is_dyn_asid(prev_asid) &&
843 			   this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
844 			   next->context.ctx_id);
845 
846 		/*
847 		 * If this races with another thread that enables lam, 'new_lam'
848 		 * might not match tlbstate_lam_cr3_mask().
849 		 */
850 
851 		/*
852 		 * Even in lazy TLB mode, the CPU should stay set in the
853 		 * mm_cpumask. The TLB shootdown code can figure out from
854 		 * cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
855 		 */
856 		if (IS_ENABLED(CONFIG_DEBUG_VM) &&
857 		    WARN_ON_ONCE(prev != &init_mm && !is_notrack_mm(prev) &&
858 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
859 			cpumask_set_cpu(cpu, mm_cpumask(next));
860 
861 		/* Check if the current mm is transitioning to a global ASID */
862 		if (mm_needs_global_asid(next, prev_asid)) {
863 			next_tlb_gen = atomic64_read(&next->context.tlb_gen);
864 			ns = choose_new_asid(next, next_tlb_gen);
865 			goto reload_tlb;
866 		}
867 
868 		/*
869 		 * Broadcast TLB invalidation keeps this ASID up to date
870 		 * all the time.
871 		 */
872 		if (is_global_asid(prev_asid))
873 			return;
874 
875 		/*
876 		 * If the CPU is not in lazy TLB mode, we are just switching
877 		 * from one thread in a process to another thread in the same
878 		 * process. No TLB flush required.
879 		 */
880 		if (!was_lazy)
881 			return;
882 
883 		/*
884 		 * Read the tlb_gen to check whether a flush is needed.
885 		 * If the TLB is up to date, just use it.
886 		 * The barrier synchronizes with the tlb_gen increment in
887 		 * the TLB shootdown code.
888 		 */
889 		smp_mb();
890 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
891 		if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
892 				next_tlb_gen)
893 			return;
894 
895 		/*
896 		 * TLB contents went out of date while we were in lazy
897 		 * mode. Fall through to the TLB switching code below.
898 		 */
899 		ns.asid = prev_asid;
900 		ns.need_flush = true;
901 	} else {
902 		/*
903 		 * Apply process to process speculation vulnerability
904 		 * mitigations if applicable.
905 		 */
906 		cond_mitigation(tsk);
907 
908 		/*
909 		 * Indicate that CR3 is about to change. nmi_uaccess_okay()
910 		 * and others are sensitive to the window where mm_cpumask(),
911 		 * CR3 and cpu_tlbstate.loaded_mm are not all in sync.
912 		 */
913 		this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
914 
915 		/*
916 		 * Make sure this CPU is set in mm_cpumask() such that we'll
917 		 * receive invalidation IPIs.
918 		 *
919 		 * Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
920 		 * operation, or explicitly provide one. Such that:
921 		 *
922 		 * switch_mm_irqs_off()				flush_tlb_mm_range()
923 		 *   smp_store_release(loaded_mm, SWITCHING);     atomic64_inc_return(tlb_gen)
924 		 *   smp_mb(); // here                            // smp_mb() implied
925 		 *   atomic64_read(tlb_gen);                      this_cpu_read(loaded_mm);
926 		 *
927 		 * we properly order against flush_tlb_mm_range(), where the
928 		 * loaded_mm load can happen in mative_flush_tlb_multi() ->
929 		 * should_flush_tlb().
930 		 *
931 		 * This way switch_mm() must see the new tlb_gen or
932 		 * flush_tlb_mm_range() must see the new loaded_mm, or both.
933 		 */
934 		if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
935 			cpumask_set_cpu(cpu, mm_cpumask(next));
936 		else
937 			smp_mb();
938 
939 		next_tlb_gen = atomic64_read(&next->context.tlb_gen);
940 
941 		ns = choose_new_asid(next, next_tlb_gen);
942 	}
943 
944 reload_tlb:
945 	new_lam = mm_lam_cr3_mask(next);
946 	if (ns.need_flush) {
947 		VM_WARN_ON_ONCE(is_global_asid(ns.asid));
948 		this_cpu_write(cpu_tlbstate.ctxs[ns.asid].ctx_id, next->context.ctx_id);
949 		this_cpu_write(cpu_tlbstate.ctxs[ns.asid].tlb_gen, next_tlb_gen);
950 		load_new_mm_cr3(next->pgd, ns.asid, new_lam, true);
951 
952 		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
953 	} else {
954 		/* The new ASID is already up to date. */
955 		load_new_mm_cr3(next->pgd, ns.asid, new_lam, false);
956 
957 		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
958 	}
959 
960 	/* Make sure we write CR3 before loaded_mm. */
961 	barrier();
962 
963 	this_cpu_write(cpu_tlbstate.loaded_mm, next);
964 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, ns.asid);
965 	cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next));
966 
967 	if (next != prev) {
968 		cr4_update_pce_mm(next);
969 		switch_ldt(prev, next);
970 	}
971 }
972 
973 /*
974  * Please ignore the name of this function.  It should be called
975  * switch_to_kernel_thread().
976  *
977  * enter_lazy_tlb() is a hint from the scheduler that we are entering a
978  * kernel thread or other context without an mm.  Acceptable implementations
979  * include doing nothing whatsoever, switching to init_mm, or various clever
980  * lazy tricks to try to minimize TLB flushes.
981  *
982  * The scheduler reserves the right to call enter_lazy_tlb() several times
983  * in a row.  It will notify us that we're going back to a real mm by
984  * calling switch_mm_irqs_off().
985  */
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)986 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
987 {
988 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
989 		return;
990 
991 	this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
992 }
993 
994 /*
995  * Using a temporary mm allows to set temporary mappings that are not accessible
996  * by other CPUs. Such mappings are needed to perform sensitive memory writes
997  * that override the kernel memory protections (e.g., W^X), without exposing the
998  * temporary page-table mappings that are required for these write operations to
999  * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1000  * mapping is torn down.  Temporary mms can also be used for EFI runtime service
1001  * calls or similar functionality.
1002  *
1003  * It is illegal to schedule while using a temporary mm -- the context switch
1004  * code is unaware of the temporary mm and does not know how to context switch.
1005  * Use a real (non-temporary) mm in a kernel thread if you need to sleep.
1006  *
1007  * Note: For sensitive memory writes, the temporary mm needs to be used
1008  *       exclusively by a single core, and IRQs should be disabled while the
1009  *       temporary mm is loaded, thereby preventing interrupt handler bugs from
1010  *       overriding the kernel memory protection.
1011  */
use_temporary_mm(struct mm_struct * temp_mm)1012 struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm)
1013 {
1014 	struct mm_struct *prev_mm;
1015 
1016 	lockdep_assert_preemption_disabled();
1017 	guard(irqsave)();
1018 
1019 	/*
1020 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1021 	 * with a stale address space WITHOUT being in lazy mode after
1022 	 * restoring the previous mm.
1023 	 */
1024 	if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1025 		leave_mm();
1026 
1027 	prev_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1028 	switch_mm_irqs_off(NULL, temp_mm, current);
1029 
1030 	/*
1031 	 * If breakpoints are enabled, disable them while the temporary mm is
1032 	 * used. Userspace might set up watchpoints on addresses that are used
1033 	 * in the temporary mm, which would lead to wrong signals being sent or
1034 	 * crashes.
1035 	 *
1036 	 * Note that breakpoints are not disabled selectively, which also causes
1037 	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1038 	 * undesirable, but still seems reasonable as the code that runs in the
1039 	 * temporary mm should be short.
1040 	 */
1041 	if (hw_breakpoint_active())
1042 		hw_breakpoint_disable();
1043 
1044 	return prev_mm;
1045 }
1046 
unuse_temporary_mm(struct mm_struct * prev_mm)1047 void unuse_temporary_mm(struct mm_struct *prev_mm)
1048 {
1049 	lockdep_assert_preemption_disabled();
1050 	guard(irqsave)();
1051 
1052 	/* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
1053 	cpumask_clear_cpu(smp_processor_id(), mm_cpumask(this_cpu_read(cpu_tlbstate.loaded_mm)));
1054 
1055 	switch_mm_irqs_off(NULL, prev_mm, current);
1056 
1057 	/*
1058 	 * Restore the breakpoints if they were disabled before the temporary mm
1059 	 * was loaded.
1060 	 */
1061 	if (hw_breakpoint_active())
1062 		hw_breakpoint_restore();
1063 }
1064 
1065 /*
1066  * Call this when reinitializing a CPU.  It fixes the following potential
1067  * problems:
1068  *
1069  * - The ASID changed from what cpu_tlbstate thinks it is (most likely
1070  *   because the CPU was taken down and came back up with CR3's PCID
1071  *   bits clear.  CPU hotplug can do this.
1072  *
1073  * - The TLB contains junk in slots corresponding to inactive ASIDs.
1074  *
1075  * - The CPU went so far out to lunch that it may have missed a TLB
1076  *   flush.
1077  */
initialize_tlbstate_and_flush(void)1078 void initialize_tlbstate_and_flush(void)
1079 {
1080 	int i;
1081 	struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1082 	u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
1083 	unsigned long lam = mm_lam_cr3_mask(mm);
1084 	unsigned long cr3 = __read_cr3();
1085 
1086 	/* Assert that CR3 already references the right mm. */
1087 	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
1088 
1089 	/* LAM expected to be disabled */
1090 	WARN_ON(cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57));
1091 	WARN_ON(lam);
1092 
1093 	/*
1094 	 * Assert that CR4.PCIDE is set if needed.  (CR4.PCIDE initialization
1095 	 * doesn't work like other CR4 bits because it can only be set from
1096 	 * long mode.)
1097 	 */
1098 	WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
1099 		!(cr4_read_shadow() & X86_CR4_PCIDE));
1100 
1101 	/* Disable LAM, force ASID 0 and force a TLB flush. */
1102 	write_cr3(build_cr3(mm->pgd, 0, 0));
1103 
1104 	/* Reinitialize tlbstate. */
1105 	this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT);
1106 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
1107 	this_cpu_write(cpu_tlbstate.next_asid, 1);
1108 	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
1109 	this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
1110 	cpu_tlbstate_update_lam(lam, mm_untag_mask(mm));
1111 
1112 	for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
1113 		this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
1114 }
1115 
1116 /*
1117  * flush_tlb_func()'s memory ordering requirement is that any
1118  * TLB fills that happen after we flush the TLB are ordered after we
1119  * read active_mm's tlb_gen.  We don't need any explicit barriers
1120  * because all x86 flush operations are serializing and the
1121  * atomic64_read operation won't be reordered by the compiler.
1122  */
flush_tlb_func(void * info)1123 static void flush_tlb_func(void *info)
1124 {
1125 	/*
1126 	 * We have three different tlb_gen values in here.  They are:
1127 	 *
1128 	 * - mm_tlb_gen:     the latest generation.
1129 	 * - local_tlb_gen:  the generation that this CPU has already caught
1130 	 *                   up to.
1131 	 * - f->new_tlb_gen: the generation that the requester of the flush
1132 	 *                   wants us to catch up to.
1133 	 */
1134 	const struct flush_tlb_info *f = info;
1135 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1136 	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1137 	u64 local_tlb_gen;
1138 	bool local = smp_processor_id() == f->initiating_cpu;
1139 	unsigned long nr_invalidate = 0;
1140 	u64 mm_tlb_gen;
1141 
1142 	/* This code cannot presently handle being reentered. */
1143 	VM_WARN_ON(!irqs_disabled());
1144 
1145 	if (!local) {
1146 		inc_irq_stat(irq_tlb_count);
1147 		count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
1148 	}
1149 
1150 	/* The CPU was left in the mm_cpumask of the target mm. Clear it. */
1151 	if (f->mm && f->mm != loaded_mm) {
1152 		cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
1153 		trace_tlb_flush(TLB_REMOTE_WRONG_CPU, 0);
1154 		return;
1155 	}
1156 
1157 	if (unlikely(loaded_mm == &init_mm))
1158 		return;
1159 
1160 	/* Reload the ASID if transitioning into or out of a global ASID */
1161 	if (mm_needs_global_asid(loaded_mm, loaded_mm_asid)) {
1162 		switch_mm_irqs_off(NULL, loaded_mm, NULL);
1163 		loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1164 	}
1165 
1166 	/* Broadcast ASIDs are always kept up to date with INVLPGB. */
1167 	if (is_global_asid(loaded_mm_asid))
1168 		return;
1169 
1170 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
1171 		   loaded_mm->context.ctx_id);
1172 
1173 	if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) {
1174 		/*
1175 		 * We're in lazy mode.  We need to at least flush our
1176 		 * paging-structure cache to avoid speculatively reading
1177 		 * garbage into our TLB.  Since switching to init_mm is barely
1178 		 * slower than a minimal flush, just switch to init_mm.
1179 		 *
1180 		 * This should be rare, with native_flush_tlb_multi() skipping
1181 		 * IPIs to lazy TLB mode CPUs.
1182 		 */
1183 		switch_mm_irqs_off(NULL, &init_mm, NULL);
1184 		return;
1185 	}
1186 
1187 	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
1188 
1189 	if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
1190 		     f->new_tlb_gen <= local_tlb_gen)) {
1191 		/*
1192 		 * The TLB is already up to date in respect to f->new_tlb_gen.
1193 		 * While the core might be still behind mm_tlb_gen, checking
1194 		 * mm_tlb_gen unnecessarily would have negative caching effects
1195 		 * so avoid it.
1196 		 */
1197 		return;
1198 	}
1199 
1200 	/*
1201 	 * Defer mm_tlb_gen reading as long as possible to avoid cache
1202 	 * contention.
1203 	 */
1204 	mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
1205 
1206 	if (unlikely(local_tlb_gen == mm_tlb_gen)) {
1207 		/*
1208 		 * There's nothing to do: we're already up to date.  This can
1209 		 * happen if two concurrent flushes happen -- the first flush to
1210 		 * be handled can catch us all the way up, leaving no work for
1211 		 * the second flush.
1212 		 */
1213 		goto done;
1214 	}
1215 
1216 	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
1217 	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
1218 
1219 	/*
1220 	 * If we get to this point, we know that our TLB is out of date.
1221 	 * This does not strictly imply that we need to flush (it's
1222 	 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
1223 	 * going to need to flush in the very near future, so we might
1224 	 * as well get it over with.
1225 	 *
1226 	 * The only question is whether to do a full or partial flush.
1227 	 *
1228 	 * We do a partial flush if requested and two extra conditions
1229 	 * are met:
1230 	 *
1231 	 * 1. f->new_tlb_gen == local_tlb_gen + 1.  We have an invariant that
1232 	 *    we've always done all needed flushes to catch up to
1233 	 *    local_tlb_gen.  If, for example, local_tlb_gen == 2 and
1234 	 *    f->new_tlb_gen == 3, then we know that the flush needed to bring
1235 	 *    us up to date for tlb_gen 3 is the partial flush we're
1236 	 *    processing.
1237 	 *
1238 	 *    As an example of why this check is needed, suppose that there
1239 	 *    are two concurrent flushes.  The first is a full flush that
1240 	 *    changes context.tlb_gen from 1 to 2.  The second is a partial
1241 	 *    flush that changes context.tlb_gen from 2 to 3.  If they get
1242 	 *    processed on this CPU in reverse order, we'll see
1243 	 *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
1244 	 *    If we were to use __flush_tlb_one_user() and set local_tlb_gen to
1245 	 *    3, we'd be break the invariant: we'd update local_tlb_gen above
1246 	 *    1 without the full flush that's needed for tlb_gen 2.
1247 	 *
1248 	 * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimization.
1249 	 *    Partial TLB flushes are not all that much cheaper than full TLB
1250 	 *    flushes, so it seems unlikely that it would be a performance win
1251 	 *    to do a partial flush if that won't bring our TLB fully up to
1252 	 *    date.  By doing a full flush instead, we can increase
1253 	 *    local_tlb_gen all the way to mm_tlb_gen and we can probably
1254 	 *    avoid another flush in the very near future.
1255 	 */
1256 	if (f->end != TLB_FLUSH_ALL &&
1257 	    f->new_tlb_gen == local_tlb_gen + 1 &&
1258 	    f->new_tlb_gen == mm_tlb_gen) {
1259 		/* Partial flush */
1260 		unsigned long addr = f->start;
1261 
1262 		/* Partial flush cannot have invalid generations */
1263 		VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID);
1264 
1265 		/* Partial flush must have valid mm */
1266 		VM_WARN_ON(f->mm == NULL);
1267 
1268 		nr_invalidate = (f->end - f->start) >> f->stride_shift;
1269 
1270 		while (addr < f->end) {
1271 			flush_tlb_one_user(addr);
1272 			addr += 1UL << f->stride_shift;
1273 		}
1274 		if (local)
1275 			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
1276 	} else {
1277 		/* Full flush. */
1278 		nr_invalidate = TLB_FLUSH_ALL;
1279 
1280 		flush_tlb_local();
1281 		if (local)
1282 			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1283 	}
1284 
1285 	/* Both paths above update our state to mm_tlb_gen. */
1286 	this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
1287 
1288 	/* Tracing is done in a unified manner to reduce the code size */
1289 done:
1290 	trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
1291 				(f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
1292 						  TLB_LOCAL_MM_SHOOTDOWN,
1293 			nr_invalidate);
1294 }
1295 
should_flush_tlb(int cpu,void * data)1296 static bool should_flush_tlb(int cpu, void *data)
1297 {
1298 	struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
1299 	struct flush_tlb_info *info = data;
1300 
1301 	/*
1302 	 * Order the 'loaded_mm' and 'is_lazy' against their
1303 	 * write ordering in switch_mm_irqs_off(). Ensure
1304 	 * 'is_lazy' is at least as new as 'loaded_mm'.
1305 	 */
1306 	smp_rmb();
1307 
1308 	/* Lazy TLB will get flushed at the next context switch. */
1309 	if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
1310 		return false;
1311 
1312 	/* No mm means kernel memory flush. */
1313 	if (!info->mm)
1314 		return true;
1315 
1316 	/*
1317 	 * While switching, the remote CPU could have state from
1318 	 * either the prev or next mm. Assume the worst and flush.
1319 	 */
1320 	if (loaded_mm == LOADED_MM_SWITCHING)
1321 		return true;
1322 
1323 	/* The target mm is loaded, and the CPU is not lazy. */
1324 	if (loaded_mm == info->mm)
1325 		return true;
1326 
1327 	/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
1328 	if (info->trim_cpumask)
1329 		return true;
1330 
1331 	return false;
1332 }
1333 
should_trim_cpumask(struct mm_struct * mm)1334 static bool should_trim_cpumask(struct mm_struct *mm)
1335 {
1336 	if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
1337 		WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
1338 		return true;
1339 	}
1340 	return false;
1341 }
1342 
1343 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
1344 EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
1345 
native_flush_tlb_multi(const struct cpumask * cpumask,const struct flush_tlb_info * info)1346 STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
1347 					 const struct flush_tlb_info *info)
1348 {
1349 	/*
1350 	 * Do accounting and tracing. Note that there are (and have always been)
1351 	 * cases in which a remote TLB flush will be traced, but eventually
1352 	 * would not happen.
1353 	 */
1354 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
1355 	if (info->end == TLB_FLUSH_ALL)
1356 		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
1357 	else
1358 		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
1359 				(info->end - info->start) >> PAGE_SHIFT);
1360 
1361 	/*
1362 	 * If no page tables were freed, we can skip sending IPIs to
1363 	 * CPUs in lazy TLB mode. They will flush the CPU themselves
1364 	 * at the next context switch.
1365 	 *
1366 	 * However, if page tables are getting freed, we need to send the
1367 	 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
1368 	 * up on the new contents of what used to be page tables, while
1369 	 * doing a speculative memory access.
1370 	 */
1371 	if (info->freed_tables || mm_in_asid_transition(info->mm))
1372 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
1373 	else
1374 		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
1375 				(void *)info, 1, cpumask);
1376 }
1377 
flush_tlb_multi(const struct cpumask * cpumask,const struct flush_tlb_info * info)1378 void flush_tlb_multi(const struct cpumask *cpumask,
1379 		      const struct flush_tlb_info *info)
1380 {
1381 	__flush_tlb_multi(cpumask, info);
1382 }
1383 
1384 /*
1385  * See Documentation/arch/x86/tlb.rst for details.  We choose 33
1386  * because it is large enough to cover the vast majority (at
1387  * least 95%) of allocations, and is small enough that we are
1388  * confident it will not cause too much overhead.  Each single
1389  * flush is about 100 ns, so this caps the maximum overhead at
1390  * _about_ 3,000 ns.
1391  *
1392  * This is in units of pages.
1393  */
1394 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
1395 
1396 static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
1397 
1398 #ifdef CONFIG_DEBUG_VM
1399 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
1400 #endif
1401 
get_flush_tlb_info(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int stride_shift,bool freed_tables,u64 new_tlb_gen)1402 static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
1403 			unsigned long start, unsigned long end,
1404 			unsigned int stride_shift, bool freed_tables,
1405 			u64 new_tlb_gen)
1406 {
1407 	struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
1408 
1409 #ifdef CONFIG_DEBUG_VM
1410 	/*
1411 	 * Ensure that the following code is non-reentrant and flush_tlb_info
1412 	 * is not overwritten. This means no TLB flushing is initiated by
1413 	 * interrupt handlers and machine-check exception handlers.
1414 	 */
1415 	BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
1416 #endif
1417 
1418 	/*
1419 	 * If the number of flushes is so large that a full flush
1420 	 * would be faster, do a full flush.
1421 	 */
1422 	if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
1423 		start = 0;
1424 		end = TLB_FLUSH_ALL;
1425 	}
1426 
1427 	info->start		= start;
1428 	info->end		= end;
1429 	info->mm		= mm;
1430 	info->stride_shift	= stride_shift;
1431 	info->freed_tables	= freed_tables;
1432 	info->new_tlb_gen	= new_tlb_gen;
1433 	info->initiating_cpu	= smp_processor_id();
1434 	info->trim_cpumask	= 0;
1435 
1436 	return info;
1437 }
1438 
put_flush_tlb_info(void)1439 static void put_flush_tlb_info(void)
1440 {
1441 #ifdef CONFIG_DEBUG_VM
1442 	/* Complete reentrancy prevention checks */
1443 	barrier();
1444 	this_cpu_dec(flush_tlb_info_idx);
1445 #endif
1446 }
1447 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int stride_shift,bool freed_tables)1448 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1449 				unsigned long end, unsigned int stride_shift,
1450 				bool freed_tables)
1451 {
1452 	struct flush_tlb_info *info;
1453 	int cpu = get_cpu();
1454 	u64 new_tlb_gen;
1455 
1456 	/* This is also a barrier that synchronizes with switch_mm(). */
1457 	new_tlb_gen = inc_mm_tlb_gen(mm);
1458 
1459 	info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
1460 				  new_tlb_gen);
1461 
1462 	/*
1463 	 * flush_tlb_multi() is not optimized for the common case in which only
1464 	 * a local TLB flush is needed. Optimize this use-case by calling
1465 	 * flush_tlb_func_local() directly in this case.
1466 	 */
1467 	if (mm_global_asid(mm)) {
1468 		broadcast_tlb_flush(info);
1469 	} else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
1470 		info->trim_cpumask = should_trim_cpumask(mm);
1471 		flush_tlb_multi(mm_cpumask(mm), info);
1472 		consider_global_asid(mm);
1473 	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
1474 		lockdep_assert_irqs_enabled();
1475 		local_irq_disable();
1476 		flush_tlb_func(info);
1477 		local_irq_enable();
1478 	}
1479 
1480 	put_flush_tlb_info();
1481 	put_cpu();
1482 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
1483 }
1484 
do_flush_tlb_all(void * info)1485 static void do_flush_tlb_all(void *info)
1486 {
1487 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
1488 	__flush_tlb_all();
1489 }
1490 
flush_tlb_all(void)1491 void flush_tlb_all(void)
1492 {
1493 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
1494 
1495 	/* First try (faster) hardware-assisted TLB invalidation. */
1496 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
1497 		invlpgb_flush_all();
1498 	else
1499 		/* Fall back to the IPI-based invalidation. */
1500 		on_each_cpu(do_flush_tlb_all, NULL, 1);
1501 }
1502 
1503 /* Flush an arbitrarily large range of memory with INVLPGB. */
invlpgb_kernel_range_flush(struct flush_tlb_info * info)1504 static void invlpgb_kernel_range_flush(struct flush_tlb_info *info)
1505 {
1506 	unsigned long addr, nr;
1507 
1508 	for (addr = info->start; addr < info->end; addr += nr << PAGE_SHIFT) {
1509 		nr = (info->end - addr) >> PAGE_SHIFT;
1510 
1511 		/*
1512 		 * INVLPGB has a limit on the size of ranges it can
1513 		 * flush. Break up large flushes.
1514 		 */
1515 		nr = clamp_val(nr, 1, invlpgb_count_max);
1516 
1517 		invlpgb_flush_addr_nosync(addr, nr);
1518 	}
1519 	__tlbsync();
1520 }
1521 
do_kernel_range_flush(void * info)1522 static void do_kernel_range_flush(void *info)
1523 {
1524 	struct flush_tlb_info *f = info;
1525 	unsigned long addr;
1526 
1527 	/* flush range by one by one 'invlpg' */
1528 	for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
1529 		flush_tlb_one_kernel(addr);
1530 }
1531 
kernel_tlb_flush_all(struct flush_tlb_info * info)1532 static void kernel_tlb_flush_all(struct flush_tlb_info *info)
1533 {
1534 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
1535 		invlpgb_flush_all();
1536 	else
1537 		on_each_cpu(do_flush_tlb_all, NULL, 1);
1538 }
1539 
kernel_tlb_flush_range(struct flush_tlb_info * info)1540 static void kernel_tlb_flush_range(struct flush_tlb_info *info)
1541 {
1542 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
1543 		invlpgb_kernel_range_flush(info);
1544 	else
1545 		on_each_cpu(do_kernel_range_flush, info, 1);
1546 }
1547 
flush_tlb_kernel_range(unsigned long start,unsigned long end)1548 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
1549 {
1550 	struct flush_tlb_info *info;
1551 
1552 	guard(preempt)();
1553 
1554 	info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
1555 				  TLB_GENERATION_INVALID);
1556 
1557 	if (info->end == TLB_FLUSH_ALL)
1558 		kernel_tlb_flush_all(info);
1559 	else
1560 		kernel_tlb_flush_range(info);
1561 
1562 	put_flush_tlb_info();
1563 }
1564 
1565 /*
1566  * This can be used from process context to figure out what the value of
1567  * CR3 is without needing to do a (slow) __read_cr3().
1568  *
1569  * It's intended to be used for code like KVM that sneakily changes CR3
1570  * and needs to restore it.  It needs to be used very carefully.
1571  */
__get_current_cr3_fast(void)1572 unsigned long __get_current_cr3_fast(void)
1573 {
1574 	unsigned long cr3 =
1575 		build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
1576 			  this_cpu_read(cpu_tlbstate.loaded_mm_asid),
1577 			  tlbstate_lam_cr3_mask());
1578 
1579 	/* For now, be very restrictive about when this can be called. */
1580 	VM_WARN_ON(in_nmi() || preemptible());
1581 
1582 	VM_BUG_ON(cr3 != __read_cr3());
1583 	return cr3;
1584 }
1585 EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
1586 
1587 /*
1588  * Flush one page in the kernel mapping
1589  */
flush_tlb_one_kernel(unsigned long addr)1590 void flush_tlb_one_kernel(unsigned long addr)
1591 {
1592 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
1593 
1594 	/*
1595 	 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
1596 	 * paravirt equivalent.  Even with PCID, this is sufficient: we only
1597 	 * use PCID if we also use global PTEs for the kernel mapping, and
1598 	 * INVLPG flushes global translations across all address spaces.
1599 	 *
1600 	 * If PTI is on, then the kernel is mapped with non-global PTEs, and
1601 	 * __flush_tlb_one_user() will flush the given address for the current
1602 	 * kernel address space and for its usermode counterpart, but it does
1603 	 * not flush it for other address spaces.
1604 	 */
1605 	flush_tlb_one_user(addr);
1606 
1607 	if (!static_cpu_has(X86_FEATURE_PTI))
1608 		return;
1609 
1610 	/*
1611 	 * See above.  We need to propagate the flush to all other address
1612 	 * spaces.  In principle, we only need to propagate it to kernelmode
1613 	 * address spaces, but the extra bookkeeping we would need is not
1614 	 * worth it.
1615 	 */
1616 	this_cpu_write(cpu_tlbstate.invalidate_other, true);
1617 }
1618 
1619 /*
1620  * Flush one page in the user mapping
1621  */
native_flush_tlb_one_user(unsigned long addr)1622 STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
1623 {
1624 	u32 loaded_mm_asid;
1625 	bool cpu_pcide;
1626 
1627 	/* Flush 'addr' from the kernel PCID: */
1628 	invlpg(addr);
1629 
1630 	/* If PTI is off there is no user PCID and nothing to flush. */
1631 	if (!static_cpu_has(X86_FEATURE_PTI))
1632 		return;
1633 
1634 	loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1635 	cpu_pcide      = this_cpu_read(cpu_tlbstate.cr4) & X86_CR4_PCIDE;
1636 
1637 	/*
1638 	 * invpcid_flush_one(pcid>0) will #GP if CR4.PCIDE==0.  Check
1639 	 * 'cpu_pcide' to ensure that *this* CPU will not trigger those
1640 	 * #GP's even if called before CR4.PCIDE has been initialized.
1641 	 */
1642 	if (boot_cpu_has(X86_FEATURE_INVPCID) && cpu_pcide)
1643 		invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
1644 	else
1645 		invalidate_user_asid(loaded_mm_asid);
1646 }
1647 
flush_tlb_one_user(unsigned long addr)1648 void flush_tlb_one_user(unsigned long addr)
1649 {
1650 	__flush_tlb_one_user(addr);
1651 }
1652 
1653 /*
1654  * Flush everything
1655  */
native_flush_tlb_global(void)1656 STATIC_NOPV void native_flush_tlb_global(void)
1657 {
1658 	unsigned long flags;
1659 
1660 	if (static_cpu_has(X86_FEATURE_INVPCID)) {
1661 		/*
1662 		 * Using INVPCID is considerably faster than a pair of writes
1663 		 * to CR4 sandwiched inside an IRQ flag save/restore.
1664 		 *
1665 		 * Note, this works with CR4.PCIDE=0 or 1.
1666 		 */
1667 		invpcid_flush_all();
1668 		return;
1669 	}
1670 
1671 	/*
1672 	 * Read-modify-write to CR4 - protect it from preemption and
1673 	 * from interrupts. (Use the raw variant because this code can
1674 	 * be called from deep inside debugging code.)
1675 	 */
1676 	raw_local_irq_save(flags);
1677 
1678 	__native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
1679 
1680 	raw_local_irq_restore(flags);
1681 }
1682 
1683 /*
1684  * Flush the entire current user mapping
1685  */
native_flush_tlb_local(void)1686 STATIC_NOPV void native_flush_tlb_local(void)
1687 {
1688 	/*
1689 	 * Preemption or interrupts must be disabled to protect the access
1690 	 * to the per CPU variable and to prevent being preempted between
1691 	 * read_cr3() and write_cr3().
1692 	 */
1693 	WARN_ON_ONCE(preemptible());
1694 
1695 	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
1696 
1697 	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
1698 	native_write_cr3(__native_read_cr3());
1699 }
1700 
flush_tlb_local(void)1701 void flush_tlb_local(void)
1702 {
1703 	__flush_tlb_local();
1704 }
1705 
1706 /*
1707  * Flush everything
1708  */
__flush_tlb_all(void)1709 void __flush_tlb_all(void)
1710 {
1711 	/*
1712 	 * This is to catch users with enabled preemption and the PGE feature
1713 	 * and don't trigger the warning in __native_flush_tlb().
1714 	 */
1715 	VM_WARN_ON_ONCE(preemptible());
1716 
1717 	if (cpu_feature_enabled(X86_FEATURE_PGE)) {
1718 		__flush_tlb_global();
1719 	} else {
1720 		/*
1721 		 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
1722 		 */
1723 		flush_tlb_local();
1724 	}
1725 }
1726 EXPORT_SYMBOL_GPL(__flush_tlb_all);
1727 
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)1728 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
1729 {
1730 	struct flush_tlb_info *info;
1731 
1732 	int cpu = get_cpu();
1733 
1734 	info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false,
1735 				  TLB_GENERATION_INVALID);
1736 	/*
1737 	 * flush_tlb_multi() is not optimized for the common case in which only
1738 	 * a local TLB flush is needed. Optimize this use-case by calling
1739 	 * flush_tlb_func_local() directly in this case.
1740 	 */
1741 	if (cpu_feature_enabled(X86_FEATURE_INVLPGB) && batch->unmapped_pages) {
1742 		invlpgb_flush_all_nonglobals();
1743 		batch->unmapped_pages = false;
1744 	} else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
1745 		flush_tlb_multi(&batch->cpumask, info);
1746 	} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
1747 		lockdep_assert_irqs_enabled();
1748 		local_irq_disable();
1749 		flush_tlb_func(info);
1750 		local_irq_enable();
1751 	}
1752 
1753 	cpumask_clear(&batch->cpumask);
1754 
1755 	put_flush_tlb_info();
1756 	put_cpu();
1757 }
1758 
1759 /*
1760  * Blindly accessing user memory from NMI context can be dangerous
1761  * if we're in the middle of switching the current user task or
1762  * switching the loaded mm.  It can also be dangerous if we
1763  * interrupted some kernel code that was temporarily using a
1764  * different mm.
1765  */
nmi_uaccess_okay(void)1766 bool nmi_uaccess_okay(void)
1767 {
1768 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1769 	struct mm_struct *current_mm = current->mm;
1770 
1771 	VM_WARN_ON_ONCE(!loaded_mm);
1772 
1773 	/*
1774 	 * The condition we want to check is
1775 	 * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
1776 	 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
1777 	 * is supposed to be reasonably fast.
1778 	 *
1779 	 * Instead, we check the almost equivalent but somewhat conservative
1780 	 * condition below, and we rely on the fact that switch_mm_irqs_off()
1781 	 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
1782 	 */
1783 	if (loaded_mm != current_mm)
1784 		return false;
1785 
1786 	VM_WARN_ON_ONCE(__pa(current_mm->pgd) != read_cr3_pa());
1787 
1788 	return true;
1789 }
1790 
tlbflush_read_file(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)1791 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
1792 			     size_t count, loff_t *ppos)
1793 {
1794 	char buf[32];
1795 	unsigned int len;
1796 
1797 	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
1798 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1799 }
1800 
tlbflush_write_file(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1801 static ssize_t tlbflush_write_file(struct file *file,
1802 		 const char __user *user_buf, size_t count, loff_t *ppos)
1803 {
1804 	char buf[32];
1805 	ssize_t len;
1806 	int ceiling;
1807 
1808 	len = min(count, sizeof(buf) - 1);
1809 	if (copy_from_user(buf, user_buf, len))
1810 		return -EFAULT;
1811 
1812 	buf[len] = '\0';
1813 	if (kstrtoint(buf, 0, &ceiling))
1814 		return -EINVAL;
1815 
1816 	if (ceiling < 0)
1817 		return -EINVAL;
1818 
1819 	tlb_single_page_flush_ceiling = ceiling;
1820 	return count;
1821 }
1822 
1823 static const struct file_operations fops_tlbflush = {
1824 	.read = tlbflush_read_file,
1825 	.write = tlbflush_write_file,
1826 	.llseek = default_llseek,
1827 };
1828 
create_tlb_single_page_flush_ceiling(void)1829 static int __init create_tlb_single_page_flush_ceiling(void)
1830 {
1831 	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
1832 			    arch_debugfs_dir, NULL, &fops_tlbflush);
1833 	return 0;
1834 }
1835 late_initcall(create_tlb_single_page_flush_ceiling);
1836