1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/init.h> 3 4 #include <linux/mm.h> 5 #include <linux/spinlock.h> 6 #include <linux/smp.h> 7 #include <linux/interrupt.h> 8 #include <linux/export.h> 9 #include <linux/cpu.h> 10 #include <linux/debugfs.h> 11 #include <linux/sched/smt.h> 12 #include <linux/task_work.h> 13 #include <linux/mmu_notifier.h> 14 #include <linux/mmu_context.h> 15 16 #include <asm/tlbflush.h> 17 #include <asm/mmu_context.h> 18 #include <asm/nospec-branch.h> 19 #include <asm/cache.h> 20 #include <asm/cacheflush.h> 21 #include <asm/apic.h> 22 #include <asm/perf_event.h> 23 #include <asm/tlb.h> 24 25 #include "mm_internal.h" 26 27 #ifdef CONFIG_PARAVIRT 28 # define STATIC_NOPV 29 #else 30 # define STATIC_NOPV static 31 # define __flush_tlb_local native_flush_tlb_local 32 # define __flush_tlb_global native_flush_tlb_global 33 # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr) 34 # define __flush_tlb_multi(msk, info) native_flush_tlb_multi(msk, info) 35 #endif 36 37 /* 38 * TLB flushing, formerly SMP-only 39 * c/o Linus Torvalds. 40 * 41 * These mean you can really definitely utterly forget about 42 * writing to user space from interrupts. (Its not allowed anyway). 43 * 44 * Optimizations Manfred Spraul <manfred@colorfullife.com> 45 * 46 * More scalable flush, from Andi Kleen 47 * 48 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 49 */ 50 51 /* 52 * Bits to mangle the TIF_SPEC_* state into the mm pointer which is 53 * stored in cpu_tlb_state.last_user_mm_spec. 54 */ 55 #define LAST_USER_MM_IBPB 0x1UL 56 #define LAST_USER_MM_L1D_FLUSH 0x2UL 57 #define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH) 58 59 /* Bits to set when tlbstate and flush is (re)initialized */ 60 #define LAST_USER_MM_INIT LAST_USER_MM_IBPB 61 62 /* 63 * The x86 feature is called PCID (Process Context IDentifier). It is similar 64 * to what is traditionally called ASID on the RISC processors. 65 * 66 * We don't use the traditional ASID implementation, where each process/mm gets 67 * its own ASID and flush/restart when we run out of ASID space. 68 * 69 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's 70 * that came by on this CPU, allowing cheaper switch_mm between processes on 71 * this CPU. 72 * 73 * We end up with different spaces for different things. To avoid confusion we 74 * use different names for each of them: 75 * 76 * ASID - [0, TLB_NR_DYN_ASIDS-1] 77 * the canonical identifier for an mm 78 * 79 * kPCID - [1, TLB_NR_DYN_ASIDS] 80 * the value we write into the PCID part of CR3; corresponds to the 81 * ASID+1, because PCID 0 is special. 82 * 83 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] 84 * for KPTI each mm has two address spaces and thus needs two 85 * PCID values, but we can still do with a single ASID denomination 86 * for each mm. Corresponds to kPCID + 2048. 87 * 88 */ 89 90 /* 91 * When enabled, MITIGATION_PAGE_TABLE_ISOLATION consumes a single bit for 92 * user/kernel switches 93 */ 94 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 95 # define PTI_CONSUMED_PCID_BITS 1 96 #else 97 # define PTI_CONSUMED_PCID_BITS 0 98 #endif 99 100 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) 101 102 /* 103 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account 104 * for them being zero-based. Another -1 is because PCID 0 is reserved for 105 * use by non-PCID-aware users. 106 */ 107 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) 108 109 /* 110 * Given @asid, compute kPCID 111 */ 112 static inline u16 kern_pcid(u16 asid) 113 { 114 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); 115 116 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 117 /* 118 * Make sure that the dynamic ASID space does not conflict with the 119 * bit we are using to switch between user and kernel ASIDs. 120 */ 121 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); 122 123 /* 124 * The ASID being passed in here should have respected the 125 * MAX_ASID_AVAILABLE and thus never have the switch bit set. 126 */ 127 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); 128 #endif 129 /* 130 * The dynamically-assigned ASIDs that get passed in are small 131 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, 132 * so do not bother to clear it. 133 * 134 * If PCID is on, ASID-aware code paths put the ASID+1 into the 135 * PCID bits. This serves two purposes. It prevents a nasty 136 * situation in which PCID-unaware code saves CR3, loads some other 137 * value (with PCID == 0), and then restores CR3, thus corrupting 138 * the TLB for ASID 0 if the saved ASID was nonzero. It also means 139 * that any bugs involving loading a PCID-enabled CR3 with 140 * CR4.PCIDE off will trigger deterministically. 141 */ 142 return asid + 1; 143 } 144 145 /* 146 * Given @asid, compute uPCID 147 */ 148 static inline u16 user_pcid(u16 asid) 149 { 150 u16 ret = kern_pcid(asid); 151 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION 152 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; 153 #endif 154 return ret; 155 } 156 157 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam) 158 { 159 unsigned long cr3 = __sme_pa(pgd) | lam; 160 161 if (static_cpu_has(X86_FEATURE_PCID)) { 162 cr3 |= kern_pcid(asid); 163 } else { 164 VM_WARN_ON_ONCE(asid != 0); 165 } 166 167 return cr3; 168 } 169 170 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid, 171 unsigned long lam) 172 { 173 /* 174 * Use boot_cpu_has() instead of this_cpu_has() as this function 175 * might be called during early boot. This should work even after 176 * boot because all CPU's the have same capabilities: 177 */ 178 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); 179 return build_cr3(pgd, asid, lam) | CR3_NOFLUSH; 180 } 181 182 /* 183 * We get here when we do something requiring a TLB invalidation 184 * but could not go invalidate all of the contexts. We do the 185 * necessary invalidation by clearing out the 'ctx_id' which 186 * forces a TLB flush when the context is loaded. 187 */ 188 static void clear_asid_other(void) 189 { 190 u16 asid; 191 192 /* 193 * This is only expected to be set if we have disabled 194 * kernel _PAGE_GLOBAL pages. 195 */ 196 if (!static_cpu_has(X86_FEATURE_PTI)) { 197 WARN_ON_ONCE(1); 198 return; 199 } 200 201 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 202 /* Do not need to flush the current asid */ 203 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) 204 continue; 205 /* 206 * Make sure the next time we go to switch to 207 * this asid, we do a flush: 208 */ 209 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); 210 } 211 this_cpu_write(cpu_tlbstate.invalidate_other, false); 212 } 213 214 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 215 216 217 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 218 u16 *new_asid, bool *need_flush) 219 { 220 u16 asid; 221 222 if (!static_cpu_has(X86_FEATURE_PCID)) { 223 *new_asid = 0; 224 *need_flush = true; 225 return; 226 } 227 228 if (this_cpu_read(cpu_tlbstate.invalidate_other)) 229 clear_asid_other(); 230 231 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { 232 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != 233 next->context.ctx_id) 234 continue; 235 236 *new_asid = asid; 237 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < 238 next_tlb_gen); 239 return; 240 } 241 242 /* 243 * We don't currently own an ASID slot on this CPU. 244 * Allocate a slot. 245 */ 246 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; 247 if (*new_asid >= TLB_NR_DYN_ASIDS) { 248 *new_asid = 0; 249 this_cpu_write(cpu_tlbstate.next_asid, 1); 250 } 251 *need_flush = true; 252 } 253 254 /* 255 * Given an ASID, flush the corresponding user ASID. We can delay this 256 * until the next time we switch to it. 257 * 258 * See SWITCH_TO_USER_CR3. 259 */ 260 static inline void invalidate_user_asid(u16 asid) 261 { 262 /* There is no user ASID if address space separation is off */ 263 if (!IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION)) 264 return; 265 266 /* 267 * We only have a single ASID if PCID is off and the CR3 268 * write will have flushed it. 269 */ 270 if (!cpu_feature_enabled(X86_FEATURE_PCID)) 271 return; 272 273 if (!static_cpu_has(X86_FEATURE_PTI)) 274 return; 275 276 __set_bit(kern_pcid(asid), 277 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); 278 } 279 280 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, unsigned long lam, 281 bool need_flush) 282 { 283 unsigned long new_mm_cr3; 284 285 if (need_flush) { 286 invalidate_user_asid(new_asid); 287 new_mm_cr3 = build_cr3(pgdir, new_asid, lam); 288 } else { 289 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid, lam); 290 } 291 292 /* 293 * Caution: many callers of this function expect 294 * that load_cr3() is serializing and orders TLB 295 * fills with respect to the mm_cpumask writes. 296 */ 297 write_cr3(new_mm_cr3); 298 } 299 300 void leave_mm(void) 301 { 302 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 303 304 /* 305 * It's plausible that we're in lazy TLB mode while our mm is init_mm. 306 * If so, our callers still expect us to flush the TLB, but there 307 * aren't any user TLB entries in init_mm to worry about. 308 * 309 * This needs to happen before any other sanity checks due to 310 * intel_idle's shenanigans. 311 */ 312 if (loaded_mm == &init_mm) 313 return; 314 315 /* Warn if we're not lazy. */ 316 WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy)); 317 318 switch_mm(NULL, &init_mm, NULL); 319 } 320 EXPORT_SYMBOL_GPL(leave_mm); 321 322 void switch_mm(struct mm_struct *prev, struct mm_struct *next, 323 struct task_struct *tsk) 324 { 325 unsigned long flags; 326 327 local_irq_save(flags); 328 switch_mm_irqs_off(NULL, next, tsk); 329 local_irq_restore(flags); 330 } 331 332 /* 333 * Invoked from return to user/guest by a task that opted-in to L1D 334 * flushing but ended up running on an SMT enabled core due to wrong 335 * affinity settings or CPU hotplug. This is part of the paranoid L1D flush 336 * contract which this task requested. 337 */ 338 static void l1d_flush_force_sigbus(struct callback_head *ch) 339 { 340 force_sig(SIGBUS); 341 } 342 343 static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm, 344 struct task_struct *next) 345 { 346 /* Flush L1D if the outgoing task requests it */ 347 if (prev_mm & LAST_USER_MM_L1D_FLUSH) 348 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 349 350 /* Check whether the incoming task opted in for L1D flush */ 351 if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH))) 352 return; 353 354 /* 355 * Validate that it is not running on an SMT sibling as this would 356 * make the exercise pointless because the siblings share L1D. If 357 * it runs on a SMT sibling, notify it with SIGBUS on return to 358 * user/guest 359 */ 360 if (this_cpu_read(cpu_info.smt_active)) { 361 clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH); 362 next->l1d_flush_kill.func = l1d_flush_force_sigbus; 363 task_work_add(next, &next->l1d_flush_kill, TWA_RESUME); 364 } 365 } 366 367 static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next) 368 { 369 unsigned long next_tif = read_task_thread_flags(next); 370 unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK; 371 372 /* 373 * Ensure that the bit shift above works as expected and the two flags 374 * end up in bit 0 and 1. 375 */ 376 BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1); 377 378 return (unsigned long)next->mm | spec_bits; 379 } 380 381 static void cond_mitigation(struct task_struct *next) 382 { 383 unsigned long prev_mm, next_mm; 384 385 if (!next || !next->mm) 386 return; 387 388 next_mm = mm_mangle_tif_spec_bits(next); 389 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec); 390 391 /* 392 * Avoid user/user BTB poisoning by flushing the branch predictor 393 * when switching between processes. This stops one process from 394 * doing Spectre-v2 attacks on another. 395 * 396 * Both, the conditional and the always IBPB mode use the mm 397 * pointer to avoid the IBPB when switching between tasks of the 398 * same process. Using the mm pointer instead of mm->context.ctx_id 399 * opens a hypothetical hole vs. mm_struct reuse, which is more or 400 * less impossible to control by an attacker. Aside of that it 401 * would only affect the first schedule so the theoretically 402 * exposed data is not really interesting. 403 */ 404 if (static_branch_likely(&switch_mm_cond_ibpb)) { 405 /* 406 * This is a bit more complex than the always mode because 407 * it has to handle two cases: 408 * 409 * 1) Switch from a user space task (potential attacker) 410 * which has TIF_SPEC_IB set to a user space task 411 * (potential victim) which has TIF_SPEC_IB not set. 412 * 413 * 2) Switch from a user space task (potential attacker) 414 * which has TIF_SPEC_IB not set to a user space task 415 * (potential victim) which has TIF_SPEC_IB set. 416 * 417 * This could be done by unconditionally issuing IBPB when 418 * a task which has TIF_SPEC_IB set is either scheduled in 419 * or out. Though that results in two flushes when: 420 * 421 * - the same user space task is scheduled out and later 422 * scheduled in again and only a kernel thread ran in 423 * between. 424 * 425 * - a user space task belonging to the same process is 426 * scheduled in after a kernel thread ran in between 427 * 428 * - a user space task belonging to the same process is 429 * scheduled in immediately. 430 * 431 * Optimize this with reasonably small overhead for the 432 * above cases. Mangle the TIF_SPEC_IB bit into the mm 433 * pointer of the incoming task which is stored in 434 * cpu_tlbstate.last_user_mm_spec for comparison. 435 * 436 * Issue IBPB only if the mm's are different and one or 437 * both have the IBPB bit set. 438 */ 439 if (next_mm != prev_mm && 440 (next_mm | prev_mm) & LAST_USER_MM_IBPB) 441 indirect_branch_prediction_barrier(); 442 } 443 444 if (static_branch_unlikely(&switch_mm_always_ibpb)) { 445 /* 446 * Only flush when switching to a user space task with a 447 * different context than the user space task which ran 448 * last on this CPU. 449 */ 450 if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != 451 (unsigned long)next->mm) 452 indirect_branch_prediction_barrier(); 453 } 454 455 if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) { 456 /* 457 * Flush L1D when the outgoing task requested it and/or 458 * check whether the incoming task requested L1D flushing 459 * and ended up on an SMT sibling. 460 */ 461 if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH)) 462 l1d_flush_evaluate(prev_mm, next_mm, next); 463 } 464 465 this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm); 466 } 467 468 #ifdef CONFIG_PERF_EVENTS 469 static inline void cr4_update_pce_mm(struct mm_struct *mm) 470 { 471 if (static_branch_unlikely(&rdpmc_always_available_key) || 472 (!static_branch_unlikely(&rdpmc_never_available_key) && 473 atomic_read(&mm->context.perf_rdpmc_allowed))) { 474 /* 475 * Clear the existing dirty counters to 476 * prevent the leak for an RDPMC task. 477 */ 478 perf_clear_dirty_counters(); 479 cr4_set_bits_irqsoff(X86_CR4_PCE); 480 } else 481 cr4_clear_bits_irqsoff(X86_CR4_PCE); 482 } 483 484 void cr4_update_pce(void *ignored) 485 { 486 cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm)); 487 } 488 489 #else 490 static inline void cr4_update_pce_mm(struct mm_struct *mm) { } 491 #endif 492 493 /* 494 * This optimizes when not actually switching mm's. Some architectures use the 495 * 'unused' argument for this optimization, but x86 must use 496 * 'cpu_tlbstate.loaded_mm' instead because it does not always keep 497 * 'current->active_mm' up to date. 498 */ 499 void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, 500 struct task_struct *tsk) 501 { 502 struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm); 503 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 504 bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); 505 unsigned cpu = smp_processor_id(); 506 unsigned long new_lam; 507 u64 next_tlb_gen; 508 bool need_flush; 509 u16 new_asid; 510 511 /* We don't want flush_tlb_func() to run concurrently with us. */ 512 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 513 WARN_ON_ONCE(!irqs_disabled()); 514 515 /* 516 * Verify that CR3 is what we think it is. This will catch 517 * hypothetical buggy code that directly switches to swapper_pg_dir 518 * without going through leave_mm() / switch_mm_irqs_off() or that 519 * does something like write_cr3(read_cr3_pa()). 520 * 521 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() 522 * isn't free. 523 */ 524 #ifdef CONFIG_DEBUG_VM 525 if (WARN_ON_ONCE(__read_cr3() != build_cr3(prev->pgd, prev_asid, 526 tlbstate_lam_cr3_mask()))) { 527 /* 528 * If we were to BUG here, we'd be very likely to kill 529 * the system so hard that we don't see the call trace. 530 * Try to recover instead by ignoring the error and doing 531 * a global flush to minimize the chance of corruption. 532 * 533 * (This is far from being a fully correct recovery. 534 * Architecturally, the CPU could prefetch something 535 * back into an incorrect ASID slot and leave it there 536 * to cause trouble down the road. It's better than 537 * nothing, though.) 538 */ 539 __flush_tlb_all(); 540 } 541 #endif 542 if (was_lazy) 543 this_cpu_write(cpu_tlbstate_shared.is_lazy, false); 544 545 /* 546 * The membarrier system call requires a full memory barrier and 547 * core serialization before returning to user-space, after 548 * storing to rq->curr, when changing mm. This is because 549 * membarrier() sends IPIs to all CPUs that are in the target mm 550 * to make them issue memory barriers. However, if another CPU 551 * switches to/from the target mm concurrently with 552 * membarrier(), it can cause that CPU not to receive an IPI 553 * when it really should issue a memory barrier. Writing to CR3 554 * provides that full memory barrier and core serializing 555 * instruction. 556 */ 557 if (prev == next) { 558 /* Not actually switching mm's */ 559 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 560 next->context.ctx_id); 561 562 /* 563 * If this races with another thread that enables lam, 'new_lam' 564 * might not match tlbstate_lam_cr3_mask(). 565 */ 566 567 /* 568 * Even in lazy TLB mode, the CPU should stay set in the 569 * mm_cpumask. The TLB shootdown code can figure out from 570 * cpu_tlbstate_shared.is_lazy whether or not to send an IPI. 571 */ 572 if (IS_ENABLED(CONFIG_DEBUG_VM) && WARN_ON_ONCE(prev != &init_mm && 573 !cpumask_test_cpu(cpu, mm_cpumask(next)))) 574 cpumask_set_cpu(cpu, mm_cpumask(next)); 575 576 /* 577 * If the CPU is not in lazy TLB mode, we are just switching 578 * from one thread in a process to another thread in the same 579 * process. No TLB flush required. 580 */ 581 if (!was_lazy) 582 return; 583 584 /* 585 * Read the tlb_gen to check whether a flush is needed. 586 * If the TLB is up to date, just use it. 587 * The barrier synchronizes with the tlb_gen increment in 588 * the TLB shootdown code. 589 */ 590 smp_mb(); 591 next_tlb_gen = atomic64_read(&next->context.tlb_gen); 592 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == 593 next_tlb_gen) 594 return; 595 596 /* 597 * TLB contents went out of date while we were in lazy 598 * mode. Fall through to the TLB switching code below. 599 */ 600 new_asid = prev_asid; 601 need_flush = true; 602 } else { 603 /* 604 * Apply process to process speculation vulnerability 605 * mitigations if applicable. 606 */ 607 cond_mitigation(tsk); 608 609 /* 610 * Stop remote flushes for the previous mm. 611 * Skip kernel threads; we never send init_mm TLB flushing IPIs, 612 * but the bitmap manipulation can cause cache line contention. 613 */ 614 if (prev != &init_mm) { 615 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, 616 mm_cpumask(prev))); 617 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 618 } 619 620 /* Start receiving IPIs and then read tlb_gen (and LAM below) */ 621 if (next != &init_mm) 622 cpumask_set_cpu(cpu, mm_cpumask(next)); 623 next_tlb_gen = atomic64_read(&next->context.tlb_gen); 624 625 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); 626 627 /* Let nmi_uaccess_okay() know that we're changing CR3. */ 628 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); 629 barrier(); 630 } 631 632 new_lam = mm_lam_cr3_mask(next); 633 if (need_flush) { 634 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 635 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 636 load_new_mm_cr3(next->pgd, new_asid, new_lam, true); 637 638 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 639 } else { 640 /* The new ASID is already up to date. */ 641 load_new_mm_cr3(next->pgd, new_asid, new_lam, false); 642 643 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); 644 } 645 646 /* Make sure we write CR3 before loaded_mm. */ 647 barrier(); 648 649 this_cpu_write(cpu_tlbstate.loaded_mm, next); 650 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); 651 cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next)); 652 653 if (next != prev) { 654 cr4_update_pce_mm(next); 655 switch_ldt(prev, next); 656 } 657 } 658 659 /* 660 * Please ignore the name of this function. It should be called 661 * switch_to_kernel_thread(). 662 * 663 * enter_lazy_tlb() is a hint from the scheduler that we are entering a 664 * kernel thread or other context without an mm. Acceptable implementations 665 * include doing nothing whatsoever, switching to init_mm, or various clever 666 * lazy tricks to try to minimize TLB flushes. 667 * 668 * The scheduler reserves the right to call enter_lazy_tlb() several times 669 * in a row. It will notify us that we're going back to a real mm by 670 * calling switch_mm_irqs_off(). 671 */ 672 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 673 { 674 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) 675 return; 676 677 this_cpu_write(cpu_tlbstate_shared.is_lazy, true); 678 } 679 680 /* 681 * Call this when reinitializing a CPU. It fixes the following potential 682 * problems: 683 * 684 * - The ASID changed from what cpu_tlbstate thinks it is (most likely 685 * because the CPU was taken down and came back up with CR3's PCID 686 * bits clear. CPU hotplug can do this. 687 * 688 * - The TLB contains junk in slots corresponding to inactive ASIDs. 689 * 690 * - The CPU went so far out to lunch that it may have missed a TLB 691 * flush. 692 */ 693 void initialize_tlbstate_and_flush(void) 694 { 695 int i; 696 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); 697 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); 698 unsigned long lam = mm_lam_cr3_mask(mm); 699 unsigned long cr3 = __read_cr3(); 700 701 /* Assert that CR3 already references the right mm. */ 702 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd)); 703 704 /* LAM expected to be disabled */ 705 WARN_ON(cr3 & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)); 706 WARN_ON(lam); 707 708 /* 709 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization 710 * doesn't work like other CR4 bits because it can only be set from 711 * long mode.) 712 */ 713 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) && 714 !(cr4_read_shadow() & X86_CR4_PCIDE)); 715 716 /* Disable LAM, force ASID 0 and force a TLB flush. */ 717 write_cr3(build_cr3(mm->pgd, 0, 0)); 718 719 /* Reinitialize tlbstate. */ 720 this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT); 721 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); 722 this_cpu_write(cpu_tlbstate.next_asid, 1); 723 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); 724 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); 725 cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); 726 727 for (i = 1; i < TLB_NR_DYN_ASIDS; i++) 728 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); 729 } 730 731 /* 732 * flush_tlb_func()'s memory ordering requirement is that any 733 * TLB fills that happen after we flush the TLB are ordered after we 734 * read active_mm's tlb_gen. We don't need any explicit barriers 735 * because all x86 flush operations are serializing and the 736 * atomic64_read operation won't be reordered by the compiler. 737 */ 738 static void flush_tlb_func(void *info) 739 { 740 /* 741 * We have three different tlb_gen values in here. They are: 742 * 743 * - mm_tlb_gen: the latest generation. 744 * - local_tlb_gen: the generation that this CPU has already caught 745 * up to. 746 * - f->new_tlb_gen: the generation that the requester of the flush 747 * wants us to catch up to. 748 */ 749 const struct flush_tlb_info *f = info; 750 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 751 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 752 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); 753 bool local = smp_processor_id() == f->initiating_cpu; 754 unsigned long nr_invalidate = 0; 755 u64 mm_tlb_gen; 756 757 /* This code cannot presently handle being reentered. */ 758 VM_WARN_ON(!irqs_disabled()); 759 760 if (!local) { 761 inc_irq_stat(irq_tlb_count); 762 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 763 764 /* Can only happen on remote CPUs */ 765 if (f->mm && f->mm != loaded_mm) 766 return; 767 } 768 769 if (unlikely(loaded_mm == &init_mm)) 770 return; 771 772 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != 773 loaded_mm->context.ctx_id); 774 775 if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) { 776 /* 777 * We're in lazy mode. We need to at least flush our 778 * paging-structure cache to avoid speculatively reading 779 * garbage into our TLB. Since switching to init_mm is barely 780 * slower than a minimal flush, just switch to init_mm. 781 * 782 * This should be rare, with native_flush_tlb_multi() skipping 783 * IPIs to lazy TLB mode CPUs. 784 */ 785 switch_mm_irqs_off(NULL, &init_mm, NULL); 786 return; 787 } 788 789 if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && 790 f->new_tlb_gen <= local_tlb_gen)) { 791 /* 792 * The TLB is already up to date in respect to f->new_tlb_gen. 793 * While the core might be still behind mm_tlb_gen, checking 794 * mm_tlb_gen unnecessarily would have negative caching effects 795 * so avoid it. 796 */ 797 return; 798 } 799 800 /* 801 * Defer mm_tlb_gen reading as long as possible to avoid cache 802 * contention. 803 */ 804 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); 805 806 if (unlikely(local_tlb_gen == mm_tlb_gen)) { 807 /* 808 * There's nothing to do: we're already up to date. This can 809 * happen if two concurrent flushes happen -- the first flush to 810 * be handled can catch us all the way up, leaving no work for 811 * the second flush. 812 */ 813 goto done; 814 } 815 816 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); 817 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); 818 819 /* 820 * If we get to this point, we know that our TLB is out of date. 821 * This does not strictly imply that we need to flush (it's 822 * possible that f->new_tlb_gen <= local_tlb_gen), but we're 823 * going to need to flush in the very near future, so we might 824 * as well get it over with. 825 * 826 * The only question is whether to do a full or partial flush. 827 * 828 * We do a partial flush if requested and two extra conditions 829 * are met: 830 * 831 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that 832 * we've always done all needed flushes to catch up to 833 * local_tlb_gen. If, for example, local_tlb_gen == 2 and 834 * f->new_tlb_gen == 3, then we know that the flush needed to bring 835 * us up to date for tlb_gen 3 is the partial flush we're 836 * processing. 837 * 838 * As an example of why this check is needed, suppose that there 839 * are two concurrent flushes. The first is a full flush that 840 * changes context.tlb_gen from 1 to 2. The second is a partial 841 * flush that changes context.tlb_gen from 2 to 3. If they get 842 * processed on this CPU in reverse order, we'll see 843 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. 844 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to 845 * 3, we'd be break the invariant: we'd update local_tlb_gen above 846 * 1 without the full flush that's needed for tlb_gen 2. 847 * 848 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization. 849 * Partial TLB flushes are not all that much cheaper than full TLB 850 * flushes, so it seems unlikely that it would be a performance win 851 * to do a partial flush if that won't bring our TLB fully up to 852 * date. By doing a full flush instead, we can increase 853 * local_tlb_gen all the way to mm_tlb_gen and we can probably 854 * avoid another flush in the very near future. 855 */ 856 if (f->end != TLB_FLUSH_ALL && 857 f->new_tlb_gen == local_tlb_gen + 1 && 858 f->new_tlb_gen == mm_tlb_gen) { 859 /* Partial flush */ 860 unsigned long addr = f->start; 861 862 /* Partial flush cannot have invalid generations */ 863 VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID); 864 865 /* Partial flush must have valid mm */ 866 VM_WARN_ON(f->mm == NULL); 867 868 nr_invalidate = (f->end - f->start) >> f->stride_shift; 869 870 while (addr < f->end) { 871 flush_tlb_one_user(addr); 872 addr += 1UL << f->stride_shift; 873 } 874 if (local) 875 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate); 876 } else { 877 /* Full flush. */ 878 nr_invalidate = TLB_FLUSH_ALL; 879 880 flush_tlb_local(); 881 if (local) 882 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 883 } 884 885 /* Both paths above update our state to mm_tlb_gen. */ 886 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); 887 888 /* Tracing is done in a unified manner to reduce the code size */ 889 done: 890 trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN : 891 (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : 892 TLB_LOCAL_MM_SHOOTDOWN, 893 nr_invalidate); 894 } 895 896 static bool tlb_is_not_lazy(int cpu, void *data) 897 { 898 return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); 899 } 900 901 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); 902 EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared); 903 904 STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, 905 const struct flush_tlb_info *info) 906 { 907 /* 908 * Do accounting and tracing. Note that there are (and have always been) 909 * cases in which a remote TLB flush will be traced, but eventually 910 * would not happen. 911 */ 912 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 913 if (info->end == TLB_FLUSH_ALL) 914 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); 915 else 916 trace_tlb_flush(TLB_REMOTE_SEND_IPI, 917 (info->end - info->start) >> PAGE_SHIFT); 918 919 /* 920 * If no page tables were freed, we can skip sending IPIs to 921 * CPUs in lazy TLB mode. They will flush the CPU themselves 922 * at the next context switch. 923 * 924 * However, if page tables are getting freed, we need to send the 925 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping 926 * up on the new contents of what used to be page tables, while 927 * doing a speculative memory access. 928 */ 929 if (info->freed_tables) 930 on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); 931 else 932 on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func, 933 (void *)info, 1, cpumask); 934 } 935 936 void flush_tlb_multi(const struct cpumask *cpumask, 937 const struct flush_tlb_info *info) 938 { 939 __flush_tlb_multi(cpumask, info); 940 } 941 942 /* 943 * See Documentation/arch/x86/tlb.rst for details. We choose 33 944 * because it is large enough to cover the vast majority (at 945 * least 95%) of allocations, and is small enough that we are 946 * confident it will not cause too much overhead. Each single 947 * flush is about 100 ns, so this caps the maximum overhead at 948 * _about_ 3,000 ns. 949 * 950 * This is in units of pages. 951 */ 952 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 953 954 static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info); 955 956 #ifdef CONFIG_DEBUG_VM 957 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx); 958 #endif 959 960 static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, 961 unsigned long start, unsigned long end, 962 unsigned int stride_shift, bool freed_tables, 963 u64 new_tlb_gen) 964 { 965 struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info); 966 967 #ifdef CONFIG_DEBUG_VM 968 /* 969 * Ensure that the following code is non-reentrant and flush_tlb_info 970 * is not overwritten. This means no TLB flushing is initiated by 971 * interrupt handlers and machine-check exception handlers. 972 */ 973 BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1); 974 #endif 975 976 info->start = start; 977 info->end = end; 978 info->mm = mm; 979 info->stride_shift = stride_shift; 980 info->freed_tables = freed_tables; 981 info->new_tlb_gen = new_tlb_gen; 982 info->initiating_cpu = smp_processor_id(); 983 984 return info; 985 } 986 987 static void put_flush_tlb_info(void) 988 { 989 #ifdef CONFIG_DEBUG_VM 990 /* Complete reentrancy prevention checks */ 991 barrier(); 992 this_cpu_dec(flush_tlb_info_idx); 993 #endif 994 } 995 996 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 997 unsigned long end, unsigned int stride_shift, 998 bool freed_tables) 999 { 1000 struct flush_tlb_info *info; 1001 u64 new_tlb_gen; 1002 int cpu; 1003 1004 cpu = get_cpu(); 1005 1006 /* Should we flush just the requested range? */ 1007 if ((end == TLB_FLUSH_ALL) || 1008 ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) { 1009 start = 0; 1010 end = TLB_FLUSH_ALL; 1011 } 1012 1013 /* This is also a barrier that synchronizes with switch_mm(). */ 1014 new_tlb_gen = inc_mm_tlb_gen(mm); 1015 1016 info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables, 1017 new_tlb_gen); 1018 1019 /* 1020 * flush_tlb_multi() is not optimized for the common case in which only 1021 * a local TLB flush is needed. Optimize this use-case by calling 1022 * flush_tlb_func_local() directly in this case. 1023 */ 1024 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { 1025 flush_tlb_multi(mm_cpumask(mm), info); 1026 } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { 1027 lockdep_assert_irqs_enabled(); 1028 local_irq_disable(); 1029 flush_tlb_func(info); 1030 local_irq_enable(); 1031 } 1032 1033 put_flush_tlb_info(); 1034 put_cpu(); 1035 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); 1036 } 1037 1038 1039 static void do_flush_tlb_all(void *info) 1040 { 1041 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 1042 __flush_tlb_all(); 1043 } 1044 1045 void flush_tlb_all(void) 1046 { 1047 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 1048 on_each_cpu(do_flush_tlb_all, NULL, 1); 1049 } 1050 1051 static void do_kernel_range_flush(void *info) 1052 { 1053 struct flush_tlb_info *f = info; 1054 unsigned long addr; 1055 1056 /* flush range by one by one 'invlpg' */ 1057 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 1058 flush_tlb_one_kernel(addr); 1059 } 1060 1061 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 1062 { 1063 /* Balance as user space task's flush, a bit conservative */ 1064 if (end == TLB_FLUSH_ALL || 1065 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { 1066 on_each_cpu(do_flush_tlb_all, NULL, 1); 1067 } else { 1068 struct flush_tlb_info *info; 1069 1070 preempt_disable(); 1071 info = get_flush_tlb_info(NULL, start, end, 0, false, 1072 TLB_GENERATION_INVALID); 1073 1074 on_each_cpu(do_kernel_range_flush, info, 1); 1075 1076 put_flush_tlb_info(); 1077 preempt_enable(); 1078 } 1079 } 1080 1081 /* 1082 * This can be used from process context to figure out what the value of 1083 * CR3 is without needing to do a (slow) __read_cr3(). 1084 * 1085 * It's intended to be used for code like KVM that sneakily changes CR3 1086 * and needs to restore it. It needs to be used very carefully. 1087 */ 1088 unsigned long __get_current_cr3_fast(void) 1089 { 1090 unsigned long cr3 = 1091 build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, 1092 this_cpu_read(cpu_tlbstate.loaded_mm_asid), 1093 tlbstate_lam_cr3_mask()); 1094 1095 /* For now, be very restrictive about when this can be called. */ 1096 VM_WARN_ON(in_nmi() || preemptible()); 1097 1098 VM_BUG_ON(cr3 != __read_cr3()); 1099 return cr3; 1100 } 1101 EXPORT_SYMBOL_GPL(__get_current_cr3_fast); 1102 1103 /* 1104 * Flush one page in the kernel mapping 1105 */ 1106 void flush_tlb_one_kernel(unsigned long addr) 1107 { 1108 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 1109 1110 /* 1111 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its 1112 * paravirt equivalent. Even with PCID, this is sufficient: we only 1113 * use PCID if we also use global PTEs for the kernel mapping, and 1114 * INVLPG flushes global translations across all address spaces. 1115 * 1116 * If PTI is on, then the kernel is mapped with non-global PTEs, and 1117 * __flush_tlb_one_user() will flush the given address for the current 1118 * kernel address space and for its usermode counterpart, but it does 1119 * not flush it for other address spaces. 1120 */ 1121 flush_tlb_one_user(addr); 1122 1123 if (!static_cpu_has(X86_FEATURE_PTI)) 1124 return; 1125 1126 /* 1127 * See above. We need to propagate the flush to all other address 1128 * spaces. In principle, we only need to propagate it to kernelmode 1129 * address spaces, but the extra bookkeeping we would need is not 1130 * worth it. 1131 */ 1132 this_cpu_write(cpu_tlbstate.invalidate_other, true); 1133 } 1134 1135 /* 1136 * Flush one page in the user mapping 1137 */ 1138 STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr) 1139 { 1140 u32 loaded_mm_asid; 1141 bool cpu_pcide; 1142 1143 /* Flush 'addr' from the kernel PCID: */ 1144 invlpg(addr); 1145 1146 /* If PTI is off there is no user PCID and nothing to flush. */ 1147 if (!static_cpu_has(X86_FEATURE_PTI)) 1148 return; 1149 1150 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 1151 cpu_pcide = this_cpu_read(cpu_tlbstate.cr4) & X86_CR4_PCIDE; 1152 1153 /* 1154 * invpcid_flush_one(pcid>0) will #GP if CR4.PCIDE==0. Check 1155 * 'cpu_pcide' to ensure that *this* CPU will not trigger those 1156 * #GP's even if called before CR4.PCIDE has been initialized. 1157 */ 1158 if (boot_cpu_has(X86_FEATURE_INVPCID) && cpu_pcide) 1159 invpcid_flush_one(user_pcid(loaded_mm_asid), addr); 1160 else 1161 invalidate_user_asid(loaded_mm_asid); 1162 } 1163 1164 void flush_tlb_one_user(unsigned long addr) 1165 { 1166 __flush_tlb_one_user(addr); 1167 } 1168 1169 /* 1170 * Flush everything 1171 */ 1172 STATIC_NOPV void native_flush_tlb_global(void) 1173 { 1174 unsigned long flags; 1175 1176 if (static_cpu_has(X86_FEATURE_INVPCID)) { 1177 /* 1178 * Using INVPCID is considerably faster than a pair of writes 1179 * to CR4 sandwiched inside an IRQ flag save/restore. 1180 * 1181 * Note, this works with CR4.PCIDE=0 or 1. 1182 */ 1183 invpcid_flush_all(); 1184 return; 1185 } 1186 1187 /* 1188 * Read-modify-write to CR4 - protect it from preemption and 1189 * from interrupts. (Use the raw variant because this code can 1190 * be called from deep inside debugging code.) 1191 */ 1192 raw_local_irq_save(flags); 1193 1194 __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4)); 1195 1196 raw_local_irq_restore(flags); 1197 } 1198 1199 /* 1200 * Flush the entire current user mapping 1201 */ 1202 STATIC_NOPV void native_flush_tlb_local(void) 1203 { 1204 /* 1205 * Preemption or interrupts must be disabled to protect the access 1206 * to the per CPU variable and to prevent being preempted between 1207 * read_cr3() and write_cr3(). 1208 */ 1209 WARN_ON_ONCE(preemptible()); 1210 1211 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 1212 1213 /* If current->mm == NULL then the read_cr3() "borrows" an mm */ 1214 native_write_cr3(__native_read_cr3()); 1215 } 1216 1217 void flush_tlb_local(void) 1218 { 1219 __flush_tlb_local(); 1220 } 1221 1222 /* 1223 * Flush everything 1224 */ 1225 void __flush_tlb_all(void) 1226 { 1227 /* 1228 * This is to catch users with enabled preemption and the PGE feature 1229 * and don't trigger the warning in __native_flush_tlb(). 1230 */ 1231 VM_WARN_ON_ONCE(preemptible()); 1232 1233 if (cpu_feature_enabled(X86_FEATURE_PGE)) { 1234 __flush_tlb_global(); 1235 } else { 1236 /* 1237 * !PGE -> !PCID (setup_pcid()), thus every flush is total. 1238 */ 1239 flush_tlb_local(); 1240 } 1241 } 1242 EXPORT_SYMBOL_GPL(__flush_tlb_all); 1243 1244 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 1245 { 1246 struct flush_tlb_info *info; 1247 1248 int cpu = get_cpu(); 1249 1250 info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 1251 TLB_GENERATION_INVALID); 1252 /* 1253 * flush_tlb_multi() is not optimized for the common case in which only 1254 * a local TLB flush is needed. Optimize this use-case by calling 1255 * flush_tlb_func_local() directly in this case. 1256 */ 1257 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { 1258 flush_tlb_multi(&batch->cpumask, info); 1259 } else if (cpumask_test_cpu(cpu, &batch->cpumask)) { 1260 lockdep_assert_irqs_enabled(); 1261 local_irq_disable(); 1262 flush_tlb_func(info); 1263 local_irq_enable(); 1264 } 1265 1266 cpumask_clear(&batch->cpumask); 1267 1268 put_flush_tlb_info(); 1269 put_cpu(); 1270 } 1271 1272 /* 1273 * Blindly accessing user memory from NMI context can be dangerous 1274 * if we're in the middle of switching the current user task or 1275 * switching the loaded mm. It can also be dangerous if we 1276 * interrupted some kernel code that was temporarily using a 1277 * different mm. 1278 */ 1279 bool nmi_uaccess_okay(void) 1280 { 1281 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); 1282 struct mm_struct *current_mm = current->mm; 1283 1284 VM_WARN_ON_ONCE(!loaded_mm); 1285 1286 /* 1287 * The condition we want to check is 1288 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, 1289 * if we're running in a VM with shadow paging, and nmi_uaccess_okay() 1290 * is supposed to be reasonably fast. 1291 * 1292 * Instead, we check the almost equivalent but somewhat conservative 1293 * condition below, and we rely on the fact that switch_mm_irqs_off() 1294 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. 1295 */ 1296 if (loaded_mm != current_mm) 1297 return false; 1298 1299 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); 1300 1301 return true; 1302 } 1303 1304 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 1305 size_t count, loff_t *ppos) 1306 { 1307 char buf[32]; 1308 unsigned int len; 1309 1310 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 1311 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1312 } 1313 1314 static ssize_t tlbflush_write_file(struct file *file, 1315 const char __user *user_buf, size_t count, loff_t *ppos) 1316 { 1317 char buf[32]; 1318 ssize_t len; 1319 int ceiling; 1320 1321 len = min(count, sizeof(buf) - 1); 1322 if (copy_from_user(buf, user_buf, len)) 1323 return -EFAULT; 1324 1325 buf[len] = '\0'; 1326 if (kstrtoint(buf, 0, &ceiling)) 1327 return -EINVAL; 1328 1329 if (ceiling < 0) 1330 return -EINVAL; 1331 1332 tlb_single_page_flush_ceiling = ceiling; 1333 return count; 1334 } 1335 1336 static const struct file_operations fops_tlbflush = { 1337 .read = tlbflush_read_file, 1338 .write = tlbflush_write_file, 1339 .llseek = default_llseek, 1340 }; 1341 1342 static int __init create_tlb_single_page_flush_ceiling(void) 1343 { 1344 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 1345 arch_debugfs_dir, NULL, &fops_tlbflush); 1346 return 0; 1347 } 1348 late_initcall(create_tlb_single_page_flush_ceiling); 1349