1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/errno.h> 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/smp.h> 8 #include <linux/cpu.h> 9 #include <linux/prctl.h> 10 #include <linux/slab.h> 11 #include <linux/sched.h> 12 #include <linux/sched/idle.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/task.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/init.h> 17 #include <linux/export.h> 18 #include <linux/pm.h> 19 #include <linux/tick.h> 20 #include <linux/random.h> 21 #include <linux/user-return-notifier.h> 22 #include <linux/dmi.h> 23 #include <linux/utsname.h> 24 #include <linux/stackprotector.h> 25 #include <linux/cpuidle.h> 26 #include <linux/acpi.h> 27 #include <linux/elf-randomize.h> 28 #include <linux/static_call.h> 29 #include <trace/events/power.h> 30 #include <linux/hw_breakpoint.h> 31 #include <linux/entry-common.h> 32 #include <asm/cpu.h> 33 #include <asm/cpuid/api.h> 34 #include <asm/apic.h> 35 #include <linux/uaccess.h> 36 #include <asm/mwait.h> 37 #include <asm/fpu/api.h> 38 #include <asm/fpu/sched.h> 39 #include <asm/fpu/xstate.h> 40 #include <asm/debugreg.h> 41 #include <asm/nmi.h> 42 #include <asm/tlbflush.h> 43 #include <asm/mce.h> 44 #include <asm/vm86.h> 45 #include <asm/switch_to.h> 46 #include <asm/desc.h> 47 #include <asm/prctl.h> 48 #include <asm/spec-ctrl.h> 49 #include <asm/io_bitmap.h> 50 #include <asm/proto.h> 51 #include <asm/frame.h> 52 #include <asm/unwind.h> 53 #include <asm/tdx.h> 54 #include <asm/mmu_context.h> 55 #include <asm/msr.h> 56 #include <asm/shstk.h> 57 58 #include "process.h" 59 60 /* 61 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 62 * no more per-task TSS's. The TSS size is kept cacheline-aligned 63 * so they are allowed to end up in the .data..cacheline_aligned 64 * section. Since TSS's are completely CPU-local, we want them 65 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 66 */ 67 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { 68 .x86_tss = { 69 /* 70 * .sp0 is only used when entering ring 0 from a lower 71 * privilege level. Since the init task never runs anything 72 * but ring 0 code, there is no need for a valid value here. 73 * Poison it. 74 */ 75 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, 76 77 #ifdef CONFIG_X86_32 78 .sp1 = TOP_OF_INIT_STACK, 79 80 .ss0 = __KERNEL_DS, 81 .ss1 = __KERNEL_CS, 82 #endif 83 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID, 84 }, 85 }; 86 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); 87 88 DEFINE_PER_CPU(bool, __tss_limit_invalid); 89 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); 90 91 /* 92 * The cache may be in an incoherent state and needs flushing during kexec. 93 * E.g., on SME/TDX platforms, dirty cacheline aliases with and without 94 * encryption bit(s) can coexist and the cache needs to be flushed before 95 * booting to the new kernel to avoid the silent memory corruption due to 96 * dirty cachelines with different encryption property being written back 97 * to the memory. 98 */ 99 DEFINE_PER_CPU(bool, cache_state_incoherent); 100 101 /* 102 * this gets called so that we can store lazy state into memory and copy the 103 * current task into the new thread. 104 */ 105 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 106 { 107 /* fpu_clone() will initialize the "dst_fpu" memory */ 108 memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(*dst), 0); 109 110 #ifdef CONFIG_VM86 111 dst->thread.vm86 = NULL; 112 #endif 113 114 return 0; 115 } 116 117 #ifdef CONFIG_X86_64 118 void arch_release_task_struct(struct task_struct *tsk) 119 { 120 if (fpu_state_size_dynamic() && !(tsk->flags & (PF_KTHREAD | PF_USER_WORKER))) 121 fpstate_free(x86_task_fpu(tsk)); 122 } 123 #endif 124 125 /* 126 * Free thread data structures etc.. 127 */ 128 void exit_thread(struct task_struct *tsk) 129 { 130 struct thread_struct *t = &tsk->thread; 131 132 if (test_thread_flag(TIF_IO_BITMAP)) 133 io_bitmap_exit(tsk); 134 135 free_vm86(t); 136 137 shstk_free(tsk); 138 fpu__drop(tsk); 139 } 140 141 static int set_new_tls(struct task_struct *p, unsigned long tls) 142 { 143 struct user_desc __user *utls = (struct user_desc __user *)tls; 144 145 if (in_ia32_syscall()) 146 return do_set_thread_area(p, -1, utls, 0); 147 else 148 return do_set_thread_area_64(p, ARCH_SET_FS, tls); 149 } 150 151 __visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs, 152 int (*fn)(void *), void *fn_arg) 153 { 154 schedule_tail(prev); 155 156 /* Is this a kernel thread? */ 157 if (unlikely(fn)) { 158 fn(fn_arg); 159 /* 160 * A kernel thread is allowed to return here after successfully 161 * calling kernel_execve(). Exit to userspace to complete the 162 * execve() syscall. 163 */ 164 regs->ax = 0; 165 } 166 167 syscall_exit_to_user_mode(regs); 168 } 169 170 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 171 { 172 u64 clone_flags = args->flags; 173 unsigned long sp = args->stack; 174 unsigned long tls = args->tls; 175 struct inactive_task_frame *frame; 176 struct fork_frame *fork_frame; 177 struct pt_regs *childregs; 178 unsigned long new_ssp; 179 int ret = 0; 180 181 childregs = task_pt_regs(p); 182 fork_frame = container_of(childregs, struct fork_frame, regs); 183 frame = &fork_frame->frame; 184 185 frame->bp = encode_frame_pointer(childregs); 186 frame->ret_addr = (unsigned long) ret_from_fork_asm; 187 p->thread.sp = (unsigned long) fork_frame; 188 p->thread.io_bitmap = NULL; 189 clear_tsk_thread_flag(p, TIF_IO_BITMAP); 190 p->thread.iopl_warn = 0; 191 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); 192 193 #ifdef CONFIG_X86_64 194 current_save_fsgs(); 195 p->thread.fsindex = current->thread.fsindex; 196 p->thread.fsbase = current->thread.fsbase; 197 p->thread.gsindex = current->thread.gsindex; 198 p->thread.gsbase = current->thread.gsbase; 199 200 savesegment(es, p->thread.es); 201 savesegment(ds, p->thread.ds); 202 203 if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) 204 set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags); 205 #else 206 p->thread.sp0 = (unsigned long) (childregs + 1); 207 savesegment(gs, p->thread.gs); 208 /* 209 * Clear all status flags including IF and set fixed bit. 64bit 210 * does not have this initialization as the frame does not contain 211 * flags. The flags consistency (especially vs. AC) is there 212 * ensured via objtool, which lacks 32bit support. 213 */ 214 frame->flags = X86_EFLAGS_FIXED; 215 #endif 216 217 /* 218 * Allocate a new shadow stack for thread if needed. If shadow stack, 219 * is disabled, new_ssp will remain 0, and fpu_clone() will know not to 220 * update it. 221 */ 222 new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size); 223 if (IS_ERR_VALUE(new_ssp)) 224 return PTR_ERR((void *)new_ssp); 225 226 fpu_clone(p, clone_flags, args->fn, new_ssp); 227 228 /* Kernel thread ? */ 229 if (unlikely(p->flags & PF_KTHREAD)) { 230 p->thread.pkru = pkru_get_init_value(); 231 memset(childregs, 0, sizeof(struct pt_regs)); 232 kthread_frame_init(frame, args->fn, args->fn_arg); 233 return 0; 234 } 235 236 /* 237 * Clone current's PKRU value from hardware. tsk->thread.pkru 238 * is only valid when scheduled out. 239 */ 240 p->thread.pkru = read_pkru(); 241 242 frame->bx = 0; 243 *childregs = *current_pt_regs(); 244 childregs->ax = 0; 245 if (sp) 246 childregs->sp = sp; 247 248 if (unlikely(args->fn)) { 249 /* 250 * A user space thread, but it doesn't return to 251 * ret_after_fork(). 252 * 253 * In order to indicate that to tools like gdb, 254 * we reset the stack and instruction pointers. 255 * 256 * It does the same kernel frame setup to return to a kernel 257 * function that a kernel thread does. 258 */ 259 childregs->sp = 0; 260 childregs->ip = 0; 261 kthread_frame_init(frame, args->fn, args->fn_arg); 262 return 0; 263 } 264 265 /* Set a new TLS for the child thread? */ 266 if (clone_flags & CLONE_SETTLS) 267 ret = set_new_tls(p, tls); 268 269 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP))) 270 io_bitmap_share(p); 271 272 return ret; 273 } 274 275 static void pkru_flush_thread(void) 276 { 277 /* 278 * If PKRU is enabled the default PKRU value has to be loaded into 279 * the hardware right here (similar to context switch). 280 */ 281 pkru_write_default(); 282 } 283 284 void flush_thread(void) 285 { 286 struct task_struct *tsk = current; 287 288 flush_ptrace_hw_breakpoint(tsk); 289 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 290 291 fpu_flush_thread(); 292 pkru_flush_thread(); 293 } 294 295 void disable_TSC(void) 296 { 297 preempt_disable(); 298 if (!test_and_set_thread_flag(TIF_NOTSC)) 299 /* 300 * Must flip the CPU state synchronously with 301 * TIF_NOTSC in the current running context. 302 */ 303 cr4_set_bits(X86_CR4_TSD); 304 preempt_enable(); 305 } 306 307 static void enable_TSC(void) 308 { 309 preempt_disable(); 310 if (test_and_clear_thread_flag(TIF_NOTSC)) 311 /* 312 * Must flip the CPU state synchronously with 313 * TIF_NOTSC in the current running context. 314 */ 315 cr4_clear_bits(X86_CR4_TSD); 316 preempt_enable(); 317 } 318 319 int get_tsc_mode(unsigned long adr) 320 { 321 unsigned int val; 322 323 if (test_thread_flag(TIF_NOTSC)) 324 val = PR_TSC_SIGSEGV; 325 else 326 val = PR_TSC_ENABLE; 327 328 return put_user(val, (unsigned int __user *)adr); 329 } 330 331 int set_tsc_mode(unsigned int val) 332 { 333 if (val == PR_TSC_SIGSEGV) 334 disable_TSC(); 335 else if (val == PR_TSC_ENABLE) 336 enable_TSC(); 337 else 338 return -EINVAL; 339 340 return 0; 341 } 342 343 DEFINE_PER_CPU(u64, msr_misc_features_shadow); 344 345 static void set_cpuid_faulting(bool on) 346 { 347 348 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 349 u64 msrval; 350 351 msrval = this_cpu_read(msr_misc_features_shadow); 352 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; 353 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT); 354 this_cpu_write(msr_misc_features_shadow, msrval); 355 wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval); 356 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 357 if (on) 358 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_CPUID_USER_DIS_BIT); 359 else 360 msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_CPUID_USER_DIS_BIT); 361 } 362 } 363 364 static void disable_cpuid(void) 365 { 366 preempt_disable(); 367 if (!test_and_set_thread_flag(TIF_NOCPUID)) { 368 /* 369 * Must flip the CPU state synchronously with 370 * TIF_NOCPUID in the current running context. 371 */ 372 set_cpuid_faulting(true); 373 } 374 preempt_enable(); 375 } 376 377 static void enable_cpuid(void) 378 { 379 preempt_disable(); 380 if (test_and_clear_thread_flag(TIF_NOCPUID)) { 381 /* 382 * Must flip the CPU state synchronously with 383 * TIF_NOCPUID in the current running context. 384 */ 385 set_cpuid_faulting(false); 386 } 387 preempt_enable(); 388 } 389 390 static int get_cpuid_mode(void) 391 { 392 return !test_thread_flag(TIF_NOCPUID); 393 } 394 395 static int set_cpuid_mode(unsigned long cpuid_enabled) 396 { 397 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT)) 398 return -ENODEV; 399 400 if (cpuid_enabled) 401 enable_cpuid(); 402 else 403 disable_cpuid(); 404 405 return 0; 406 } 407 408 /* 409 * Called immediately after a successful exec. 410 */ 411 void arch_setup_new_exec(void) 412 { 413 /* If cpuid was previously disabled for this task, re-enable it. */ 414 if (test_thread_flag(TIF_NOCPUID)) 415 enable_cpuid(); 416 417 /* 418 * Don't inherit TIF_SSBD across exec boundary when 419 * PR_SPEC_DISABLE_NOEXEC is used. 420 */ 421 if (test_thread_flag(TIF_SSBD) && 422 task_spec_ssb_noexec(current)) { 423 clear_thread_flag(TIF_SSBD); 424 task_clear_spec_ssb_disable(current); 425 task_clear_spec_ssb_noexec(current); 426 speculation_ctrl_update(read_thread_flags()); 427 } 428 429 mm_reset_untag_mask(current->mm); 430 } 431 432 #ifdef CONFIG_X86_IOPL_IOPERM 433 static inline void switch_to_bitmap(unsigned long tifp) 434 { 435 /* 436 * Invalidate I/O bitmap if the previous task used it. This prevents 437 * any possible leakage of an active I/O bitmap. 438 * 439 * If the next task has an I/O bitmap it will handle it on exit to 440 * user mode. 441 */ 442 if (tifp & _TIF_IO_BITMAP) 443 tss_invalidate_io_bitmap(); 444 } 445 446 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm) 447 { 448 /* 449 * Copy at least the byte range of the incoming tasks bitmap which 450 * covers the permitted I/O ports. 451 * 452 * If the previous task which used an I/O bitmap had more bits 453 * permitted, then the copy needs to cover those as well so they 454 * get turned off. 455 */ 456 memcpy(tss->io_bitmap.bitmap, iobm->bitmap, 457 max(tss->io_bitmap.prev_max, iobm->max)); 458 459 /* 460 * Store the new max and the sequence number of this bitmap 461 * and a pointer to the bitmap itself. 462 */ 463 tss->io_bitmap.prev_max = iobm->max; 464 tss->io_bitmap.prev_sequence = iobm->sequence; 465 } 466 467 /** 468 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode 469 */ 470 void native_tss_update_io_bitmap(void) 471 { 472 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); 473 struct thread_struct *t = ¤t->thread; 474 u16 *base = &tss->x86_tss.io_bitmap_base; 475 476 if (!test_thread_flag(TIF_IO_BITMAP)) { 477 native_tss_invalidate_io_bitmap(); 478 return; 479 } 480 481 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) { 482 *base = IO_BITMAP_OFFSET_VALID_ALL; 483 } else { 484 struct io_bitmap *iobm = t->io_bitmap; 485 486 if (WARN_ON_ONCE(!iobm)) { 487 clear_thread_flag(TIF_IO_BITMAP); 488 native_tss_invalidate_io_bitmap(); 489 } 490 491 /* 492 * Only copy bitmap data when the sequence number differs. The 493 * update time is accounted to the incoming task. 494 */ 495 if (tss->io_bitmap.prev_sequence != iobm->sequence) 496 tss_copy_io_bitmap(tss, iobm); 497 498 /* Enable the bitmap */ 499 *base = IO_BITMAP_OFFSET_VALID_MAP; 500 } 501 502 /* 503 * Make sure that the TSS limit is covering the IO bitmap. It might have 504 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O 505 * access from user space to trigger a #GP because the bitmap is outside 506 * the TSS limit. 507 */ 508 refresh_tss_limit(); 509 } 510 #else /* CONFIG_X86_IOPL_IOPERM */ 511 static inline void switch_to_bitmap(unsigned long tifp) { } 512 #endif 513 514 #ifdef CONFIG_SMP 515 516 struct ssb_state { 517 struct ssb_state *shared_state; 518 raw_spinlock_t lock; 519 unsigned int disable_state; 520 unsigned long local_state; 521 }; 522 523 #define LSTATE_SSB 0 524 525 static DEFINE_PER_CPU(struct ssb_state, ssb_state); 526 527 void speculative_store_bypass_ht_init(void) 528 { 529 struct ssb_state *st = this_cpu_ptr(&ssb_state); 530 unsigned int this_cpu = smp_processor_id(); 531 unsigned int cpu; 532 533 st->local_state = 0; 534 535 /* 536 * Shared state setup happens once on the first bringup 537 * of the CPU. It's not destroyed on CPU hotunplug. 538 */ 539 if (st->shared_state) 540 return; 541 542 raw_spin_lock_init(&st->lock); 543 544 /* 545 * Go over HT siblings and check whether one of them has set up the 546 * shared state pointer already. 547 */ 548 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { 549 if (cpu == this_cpu) 550 continue; 551 552 if (!per_cpu(ssb_state, cpu).shared_state) 553 continue; 554 555 /* Link it to the state of the sibling: */ 556 st->shared_state = per_cpu(ssb_state, cpu).shared_state; 557 return; 558 } 559 560 /* 561 * First HT sibling to come up on the core. Link shared state of 562 * the first HT sibling to itself. The siblings on the same core 563 * which come up later will see the shared state pointer and link 564 * themselves to the state of this CPU. 565 */ 566 st->shared_state = st; 567 } 568 569 /* 570 * Logic is: First HT sibling enables SSBD for both siblings in the core 571 * and last sibling to disable it, disables it for the whole core. This how 572 * MSR_SPEC_CTRL works in "hardware": 573 * 574 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL 575 */ 576 static __always_inline void amd_set_core_ssb_state(unsigned long tifn) 577 { 578 struct ssb_state *st = this_cpu_ptr(&ssb_state); 579 u64 msr = x86_amd_ls_cfg_base; 580 581 if (!static_cpu_has(X86_FEATURE_ZEN)) { 582 msr |= ssbd_tif_to_amd_ls_cfg(tifn); 583 wrmsrq(MSR_AMD64_LS_CFG, msr); 584 return; 585 } 586 587 if (tifn & _TIF_SSBD) { 588 /* 589 * Since this can race with prctl(), block reentry on the 590 * same CPU. 591 */ 592 if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) 593 return; 594 595 msr |= x86_amd_ls_cfg_ssbd_mask; 596 597 raw_spin_lock(&st->shared_state->lock); 598 /* First sibling enables SSBD: */ 599 if (!st->shared_state->disable_state) 600 wrmsrq(MSR_AMD64_LS_CFG, msr); 601 st->shared_state->disable_state++; 602 raw_spin_unlock(&st->shared_state->lock); 603 } else { 604 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) 605 return; 606 607 raw_spin_lock(&st->shared_state->lock); 608 st->shared_state->disable_state--; 609 if (!st->shared_state->disable_state) 610 wrmsrq(MSR_AMD64_LS_CFG, msr); 611 raw_spin_unlock(&st->shared_state->lock); 612 } 613 } 614 #else 615 static __always_inline void amd_set_core_ssb_state(unsigned long tifn) 616 { 617 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); 618 619 wrmsrq(MSR_AMD64_LS_CFG, msr); 620 } 621 #endif 622 623 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) 624 { 625 /* 626 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, 627 * so ssbd_tif_to_spec_ctrl() just works. 628 */ 629 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); 630 } 631 632 /* 633 * Update the MSRs managing speculation control, during context switch. 634 * 635 * tifp: Previous task's thread flags 636 * tifn: Next task's thread flags 637 */ 638 static __always_inline void __speculation_ctrl_update(unsigned long tifp, 639 unsigned long tifn) 640 { 641 unsigned long tif_diff = tifp ^ tifn; 642 u64 msr = x86_spec_ctrl_base; 643 bool updmsr = false; 644 645 lockdep_assert_irqs_disabled(); 646 647 /* Handle change of TIF_SSBD depending on the mitigation method. */ 648 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { 649 if (tif_diff & _TIF_SSBD) 650 amd_set_ssb_virt_state(tifn); 651 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { 652 if (tif_diff & _TIF_SSBD) 653 amd_set_core_ssb_state(tifn); 654 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 655 static_cpu_has(X86_FEATURE_AMD_SSBD)) { 656 updmsr |= !!(tif_diff & _TIF_SSBD); 657 msr |= ssbd_tif_to_spec_ctrl(tifn); 658 } 659 660 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ 661 if (IS_ENABLED(CONFIG_SMP) && 662 static_branch_unlikely(&switch_to_cond_stibp)) { 663 updmsr |= !!(tif_diff & _TIF_SPEC_IB); 664 msr |= stibp_tif_to_spec_ctrl(tifn); 665 } 666 667 if (updmsr) 668 update_spec_ctrl_cond(msr); 669 } 670 671 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) 672 { 673 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { 674 if (task_spec_ssb_disable(tsk)) 675 set_tsk_thread_flag(tsk, TIF_SSBD); 676 else 677 clear_tsk_thread_flag(tsk, TIF_SSBD); 678 679 if (task_spec_ib_disable(tsk)) 680 set_tsk_thread_flag(tsk, TIF_SPEC_IB); 681 else 682 clear_tsk_thread_flag(tsk, TIF_SPEC_IB); 683 } 684 /* Return the updated threadinfo flags*/ 685 return read_task_thread_flags(tsk); 686 } 687 688 void speculation_ctrl_update(unsigned long tif) 689 { 690 unsigned long flags; 691 692 /* Forced update. Make sure all relevant TIF flags are different */ 693 local_irq_save(flags); 694 __speculation_ctrl_update(~tif, tif); 695 local_irq_restore(flags); 696 } 697 698 /* Called from seccomp/prctl update */ 699 void speculation_ctrl_update_current(void) 700 { 701 preempt_disable(); 702 speculation_ctrl_update(speculation_ctrl_update_tif(current)); 703 preempt_enable(); 704 } 705 706 static inline void cr4_toggle_bits_irqsoff(unsigned long mask) 707 { 708 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4); 709 710 newval = cr4 ^ mask; 711 if (newval != cr4) { 712 this_cpu_write(cpu_tlbstate.cr4, newval); 713 __write_cr4(newval); 714 } 715 } 716 717 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) 718 { 719 unsigned long tifp, tifn; 720 721 tifn = read_task_thread_flags(next_p); 722 tifp = read_task_thread_flags(prev_p); 723 724 switch_to_bitmap(tifp); 725 726 propagate_user_return_notify(prev_p, next_p); 727 728 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) && 729 arch_has_block_step()) { 730 unsigned long debugctl, msk; 731 732 rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 733 debugctl &= ~DEBUGCTLMSR_BTF; 734 msk = tifn & _TIF_BLOCKSTEP; 735 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; 736 wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); 737 } 738 739 if ((tifp ^ tifn) & _TIF_NOTSC) 740 cr4_toggle_bits_irqsoff(X86_CR4_TSD); 741 742 if ((tifp ^ tifn) & _TIF_NOCPUID) 743 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 744 745 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { 746 __speculation_ctrl_update(tifp, tifn); 747 } else { 748 speculation_ctrl_update_tif(prev_p); 749 tifn = speculation_ctrl_update_tif(next_p); 750 751 /* Enforce MSR update to ensure consistent state */ 752 __speculation_ctrl_update(~tifn, tifn); 753 } 754 } 755 756 /* 757 * Idle related variables and functions 758 */ 759 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 760 EXPORT_SYMBOL(boot_option_idle_override); 761 762 /* 763 * We use this if we don't have any better idle routine.. 764 */ 765 void __cpuidle default_idle(void) 766 { 767 raw_safe_halt(); 768 raw_local_irq_disable(); 769 } 770 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) 771 EXPORT_SYMBOL(default_idle); 772 #endif 773 774 DEFINE_STATIC_CALL_NULL(x86_idle, default_idle); 775 776 static bool x86_idle_set(void) 777 { 778 return !!static_call_query(x86_idle); 779 } 780 781 #ifndef CONFIG_SMP 782 static inline void __noreturn play_dead(void) 783 { 784 BUG(); 785 } 786 #endif 787 788 void arch_cpu_idle_enter(void) 789 { 790 tsc_verify_tsc_adjust(false); 791 local_touch_nmi(); 792 } 793 794 void __noreturn arch_cpu_idle_dead(void) 795 { 796 play_dead(); 797 } 798 799 /* 800 * Called from the generic idle code. 801 */ 802 void __cpuidle arch_cpu_idle(void) 803 { 804 static_call(x86_idle)(); 805 } 806 EXPORT_SYMBOL_GPL(arch_cpu_idle); 807 808 #ifdef CONFIG_XEN 809 bool xen_set_default_idle(void) 810 { 811 bool ret = x86_idle_set(); 812 813 static_call_update(x86_idle, default_idle); 814 815 return ret; 816 } 817 #endif 818 819 struct cpumask cpus_stop_mask; 820 821 void __noreturn stop_this_cpu(void *dummy) 822 { 823 struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info); 824 unsigned int cpu = smp_processor_id(); 825 826 local_irq_disable(); 827 828 /* 829 * Remove this CPU from the online mask and disable it 830 * unconditionally. This might be redundant in case that the reboot 831 * vector was handled late and stop_other_cpus() sent an NMI. 832 * 833 * According to SDM and APM NMIs can be accepted even after soft 834 * disabling the local APIC. 835 */ 836 set_cpu_online(cpu, false); 837 disable_local_APIC(); 838 mcheck_cpu_clear(c); 839 840 if (this_cpu_read(cache_state_incoherent)) 841 wbinvd(); 842 843 /* 844 * This brings a cache line back and dirties it, but 845 * native_stop_other_cpus() will overwrite cpus_stop_mask after it 846 * observed that all CPUs reported stop. This write will invalidate 847 * the related cache line on this CPU. 848 */ 849 cpumask_clear_cpu(cpu, &cpus_stop_mask); 850 851 #ifdef CONFIG_SMP 852 if (smp_ops.stop_this_cpu) { 853 smp_ops.stop_this_cpu(); 854 BUG(); 855 } 856 #endif 857 858 for (;;) { 859 /* 860 * Use native_halt() so that memory contents don't change 861 * (stack usage and variables) after possibly issuing the 862 * wbinvd() above. 863 */ 864 native_halt(); 865 } 866 } 867 868 /* 869 * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf 870 * exists and whenever MONITOR/MWAIT extensions are present there is at 871 * least one C1 substate. 872 * 873 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait 874 * is passed to kernel commandline parameter. 875 */ 876 static __init bool prefer_mwait_c1_over_halt(void) 877 { 878 const struct cpuinfo_x86 *c = &boot_cpu_data; 879 u32 eax, ebx, ecx, edx; 880 881 /* If override is enforced on the command line, fall back to HALT. */ 882 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 883 return false; 884 885 /* MWAIT is not supported on this platform. Fallback to HALT */ 886 if (!cpu_has(c, X86_FEATURE_MWAIT)) 887 return false; 888 889 /* Monitor has a bug or APIC stops in C1E. Fallback to HALT */ 890 if (boot_cpu_has_bug(X86_BUG_MONITOR) || boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 891 return false; 892 893 cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx); 894 895 /* 896 * If MWAIT extensions are not available, it is safe to use MWAIT 897 * with EAX=0, ECX=0. 898 */ 899 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) 900 return true; 901 902 /* 903 * If MWAIT extensions are available, there should be at least one 904 * MWAIT C1 substate present. 905 */ 906 return !!(edx & MWAIT_C1_SUBSTATE_MASK); 907 } 908 909 /* 910 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT 911 * with interrupts enabled and no flags, which is backwards compatible with the 912 * original MWAIT implementation. 913 */ 914 static __cpuidle void mwait_idle(void) 915 { 916 if (need_resched()) 917 return; 918 919 x86_idle_clear_cpu_buffers(); 920 921 if (!current_set_polling_and_test()) { 922 const void *addr = ¤t_thread_info()->flags; 923 924 alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); 925 __monitor(addr, 0, 0); 926 if (need_resched()) 927 goto out; 928 929 __sti_mwait(0, 0); 930 raw_local_irq_disable(); 931 } 932 933 out: 934 __current_clr_polling(); 935 } 936 937 void __init select_idle_routine(void) 938 { 939 if (boot_option_idle_override == IDLE_POLL) { 940 if (IS_ENABLED(CONFIG_SMP) && __max_threads_per_core > 1) 941 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 942 return; 943 } 944 945 /* Required to guard against xen_set_default_idle() */ 946 if (x86_idle_set()) 947 return; 948 949 if (prefer_mwait_c1_over_halt()) { 950 pr_info("using mwait in idle threads\n"); 951 static_call_update(x86_idle, mwait_idle); 952 } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { 953 pr_info("using TDX aware idle routine\n"); 954 static_call_update(x86_idle, tdx_halt); 955 } else { 956 static_call_update(x86_idle, default_idle); 957 } 958 } 959 960 void amd_e400_c1e_apic_setup(void) 961 { 962 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) { 963 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id()); 964 local_irq_disable(); 965 tick_broadcast_force(); 966 local_irq_enable(); 967 } 968 } 969 970 void __init arch_post_acpi_subsys_init(void) 971 { 972 u32 lo, hi; 973 974 if (!boot_cpu_has_bug(X86_BUG_AMD_E400)) 975 return; 976 977 /* 978 * AMD E400 detection needs to happen after ACPI has been enabled. If 979 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in 980 * MSR_K8_INT_PENDING_MSG. 981 */ 982 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 983 if (!(lo & K8_INTP_C1E_ACTIVE_MASK)) 984 return; 985 986 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E); 987 988 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 989 mark_tsc_unstable("TSC halt in AMD C1E"); 990 991 if (IS_ENABLED(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE)) 992 static_branch_enable(&arch_needs_tick_broadcast); 993 pr_info("System has AMD C1E erratum E400. Workaround enabled.\n"); 994 } 995 996 static int __init idle_setup(char *str) 997 { 998 if (!str) 999 return -EINVAL; 1000 1001 if (!strcmp(str, "poll")) { 1002 pr_info("using polling idle threads\n"); 1003 boot_option_idle_override = IDLE_POLL; 1004 cpu_idle_poll_ctrl(true); 1005 } else if (!strcmp(str, "halt")) { 1006 /* 'idle=halt' HALT for idle. C-states are disabled. */ 1007 boot_option_idle_override = IDLE_HALT; 1008 } else if (!strcmp(str, "nomwait")) { 1009 /* 'idle=nomwait' disables MWAIT for idle */ 1010 boot_option_idle_override = IDLE_NOMWAIT; 1011 } else { 1012 return -EINVAL; 1013 } 1014 1015 return 0; 1016 } 1017 early_param("idle", idle_setup); 1018 1019 unsigned long arch_align_stack(unsigned long sp) 1020 { 1021 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 1022 sp -= get_random_u32_below(8192); 1023 return sp & ~0xf; 1024 } 1025 1026 unsigned long arch_randomize_brk(struct mm_struct *mm) 1027 { 1028 if (mmap_is_ia32()) 1029 return randomize_page(mm->brk, SZ_32M); 1030 1031 return randomize_page(mm->brk, SZ_1G); 1032 } 1033 1034 /* 1035 * Called from fs/proc with a reference on @p to find the function 1036 * which called into schedule(). This needs to be done carefully 1037 * because the task might wake up and we might look at a stack 1038 * changing under us. 1039 */ 1040 unsigned long __get_wchan(struct task_struct *p) 1041 { 1042 struct unwind_state state; 1043 unsigned long addr = 0; 1044 1045 if (!try_get_task_stack(p)) 1046 return 0; 1047 1048 for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state); 1049 unwind_next_frame(&state)) { 1050 addr = unwind_get_return_address(&state); 1051 if (!addr) 1052 break; 1053 if (in_sched_functions(addr)) 1054 continue; 1055 break; 1056 } 1057 1058 put_task_stack(p); 1059 1060 return addr; 1061 } 1062 1063 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) 1064 { 1065 switch (option) { 1066 case ARCH_GET_CPUID: 1067 return get_cpuid_mode(); 1068 case ARCH_SET_CPUID: 1069 return set_cpuid_mode(arg2); 1070 case ARCH_GET_XCOMP_SUPP: 1071 case ARCH_GET_XCOMP_PERM: 1072 case ARCH_REQ_XCOMP_PERM: 1073 case ARCH_GET_XCOMP_GUEST_PERM: 1074 case ARCH_REQ_XCOMP_GUEST_PERM: 1075 return fpu_xstate_prctl(option, arg2); 1076 } 1077 1078 if (!in_ia32_syscall()) 1079 return do_arch_prctl_64(current, option, arg2); 1080 1081 return -EINVAL; 1082 } 1083 1084 SYSCALL_DEFINE0(ni_syscall) 1085 { 1086 return -ENOSYS; 1087 } 1088