1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_acpi.h" 45 #include "opt_atpic.h" 46 #include "opt_cpu.h" 47 #include "opt_ddb.h" 48 #include "opt_inet.h" 49 #include "opt_isa.h" 50 #include "opt_kdb.h" 51 #include "opt_kstack_pages.h" 52 #include "opt_maxmem.h" 53 #include "opt_mp_watchdog.h" 54 #include "opt_platform.h" 55 #include "opt_sched.h" 56 #ifdef __i386__ 57 #include "opt_apic.h" 58 #endif 59 60 #include <sys/param.h> 61 #include <sys/proc.h> 62 #include <sys/systm.h> 63 #include <sys/bus.h> 64 #include <sys/cpu.h> 65 #include <sys/domainset.h> 66 #include <sys/kdb.h> 67 #include <sys/kernel.h> 68 #include <sys/ktr.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mutex.h> 72 #include <sys/pcpu.h> 73 #include <sys/rwlock.h> 74 #include <sys/sched.h> 75 #include <sys/smp.h> 76 #include <sys/sysctl.h> 77 78 #include <machine/clock.h> 79 #include <machine/cpu.h> 80 #include <machine/cpufunc.h> 81 #include <machine/cputypes.h> 82 #include <machine/specialreg.h> 83 #include <machine/md_var.h> 84 #include <machine/mp_watchdog.h> 85 #include <machine/tss.h> 86 #ifdef SMP 87 #include <machine/smp.h> 88 #endif 89 #ifdef CPU_ELAN 90 #include <machine/elan_mmcr.h> 91 #endif 92 #include <x86/acpica_machdep.h> 93 #include <x86/ifunc.h> 94 95 #include <vm/vm.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vm_kern.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_map.h> 100 #include <vm/vm_object.h> 101 #include <vm/vm_pager.h> 102 #include <vm/vm_param.h> 103 104 #include <isa/isareg.h> 105 106 #include <contrib/dev/acpica/include/acpi.h> 107 108 #define STATE_RUNNING 0x0 109 #define STATE_MWAIT 0x1 110 #define STATE_SLEEPING 0x2 111 112 #ifdef SMP 113 static u_int cpu_reset_proxyid; 114 static volatile u_int cpu_reset_proxy_active; 115 #endif 116 117 char bootmethod[16]; 118 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0, 119 "System firmware boot method"); 120 121 struct msr_op_arg { 122 u_int msr; 123 int op; 124 uint64_t arg1; 125 uint64_t *res; 126 }; 127 128 static void 129 x86_msr_op_one(void *argp) 130 { 131 struct msr_op_arg *a; 132 uint64_t v; 133 134 a = argp; 135 switch (a->op) { 136 case MSR_OP_ANDNOT: 137 v = rdmsr(a->msr); 138 v &= ~a->arg1; 139 wrmsr(a->msr, v); 140 break; 141 case MSR_OP_OR: 142 v = rdmsr(a->msr); 143 v |= a->arg1; 144 wrmsr(a->msr, v); 145 break; 146 case MSR_OP_WRITE: 147 wrmsr(a->msr, a->arg1); 148 break; 149 case MSR_OP_READ: 150 v = rdmsr(a->msr); 151 *a->res = v; 152 break; 153 } 154 } 155 156 #define MSR_OP_EXMODE_MASK 0xf0000000 157 #define MSR_OP_OP_MASK 0x000000ff 158 #define MSR_OP_GET_CPUID(x) (((x) & ~MSR_OP_EXMODE_MASK) >> 8) 159 160 void 161 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res) 162 { 163 struct thread *td; 164 struct msr_op_arg a; 165 cpuset_t set; 166 u_int exmode; 167 int bound_cpu, cpu, i, is_bound; 168 169 a.op = op & MSR_OP_OP_MASK; 170 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR || 171 a.op == MSR_OP_WRITE || a.op == MSR_OP_READ); 172 exmode = op & MSR_OP_EXMODE_MASK; 173 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL || 174 exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL || 175 exmode == MSR_OP_RENDEZVOUS_ONE); 176 a.msr = msr; 177 a.arg1 = arg1; 178 a.res = res; 179 switch (exmode) { 180 case MSR_OP_LOCAL: 181 x86_msr_op_one(&a); 182 break; 183 case MSR_OP_SCHED_ALL: 184 td = curthread; 185 thread_lock(td); 186 is_bound = sched_is_bound(td); 187 bound_cpu = td->td_oncpu; 188 CPU_FOREACH(i) { 189 sched_bind(td, i); 190 x86_msr_op_one(&a); 191 } 192 if (is_bound) 193 sched_bind(td, bound_cpu); 194 else 195 sched_unbind(td); 196 thread_unlock(td); 197 break; 198 case MSR_OP_SCHED_ONE: 199 td = curthread; 200 cpu = MSR_OP_GET_CPUID(op); 201 thread_lock(td); 202 is_bound = sched_is_bound(td); 203 bound_cpu = td->td_oncpu; 204 if (!is_bound || bound_cpu != cpu) 205 sched_bind(td, cpu); 206 x86_msr_op_one(&a); 207 if (is_bound) { 208 if (bound_cpu != cpu) 209 sched_bind(td, bound_cpu); 210 } else { 211 sched_unbind(td); 212 } 213 thread_unlock(td); 214 break; 215 case MSR_OP_RENDEZVOUS_ALL: 216 smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one, 217 smp_no_rendezvous_barrier, &a); 218 break; 219 case MSR_OP_RENDEZVOUS_ONE: 220 cpu = MSR_OP_GET_CPUID(op); 221 CPU_SETOF(cpu, &set); 222 smp_rendezvous_cpus(set, smp_no_rendezvous_barrier, 223 x86_msr_op_one, smp_no_rendezvous_barrier, &a); 224 break; 225 } 226 } 227 228 /* 229 * Automatically initialized per CPU errata in cpu_idle_tun below. 230 */ 231 bool mwait_cpustop_broken = false; 232 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN, 233 &mwait_cpustop_broken, 0, 234 "Can not reliably wake MONITOR/MWAIT cpus without interrupts"); 235 236 /* 237 * Flush the D-cache for non-DMA I/O so that the I-cache can 238 * be made coherent later. 239 */ 240 void 241 cpu_flush_dcache(void *ptr, size_t len) 242 { 243 /* Not applicable */ 244 } 245 246 void 247 acpi_cpu_c1(void) 248 { 249 250 __asm __volatile("sti; hlt"); 251 } 252 253 /* 254 * Use mwait to pause execution while waiting for an interrupt or 255 * another thread to signal that there is more work. 256 * 257 * NOTE: Interrupts will cause a wakeup; however, this function does 258 * not enable interrupt handling. The caller is responsible to enable 259 * interrupts. 260 */ 261 void 262 acpi_cpu_idle_mwait(uint32_t mwait_hint) 263 { 264 int *state; 265 uint64_t v; 266 267 /* 268 * A comment in Linux patch claims that 'CPUs run faster with 269 * speculation protection disabled. All CPU threads in a core 270 * must disable speculation protection for it to be 271 * disabled. Disable it while we are idle so the other 272 * hyperthread can run fast.' 273 * 274 * XXXKIB. Software coordination mode should be supported, 275 * but all Intel CPUs provide hardware coordination. 276 */ 277 278 state = &PCPU_PTR(monitorbuf)->idle_state; 279 KASSERT(atomic_load_int(state) == STATE_SLEEPING, 280 ("cpu_mwait_cx: wrong monitorbuf state")); 281 atomic_store_int(state, STATE_MWAIT); 282 if (PCPU_GET(ibpb_set) || hw_ssb_active) { 283 v = rdmsr(MSR_IA32_SPEC_CTRL); 284 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS | 285 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD)); 286 } else { 287 v = 0; 288 } 289 cpu_monitor(state, 0, 0); 290 if (atomic_load_int(state) == STATE_MWAIT) 291 cpu_mwait(MWAIT_INTRBREAK, mwait_hint); 292 293 /* 294 * SSB cannot be disabled while we sleep, or rather, if it was 295 * disabled, the sysctl thread will bind to our cpu to tweak 296 * MSR. 297 */ 298 if (v != 0) 299 wrmsr(MSR_IA32_SPEC_CTRL, v); 300 301 /* 302 * We should exit on any event that interrupts mwait, because 303 * that event might be a wanted interrupt. 304 */ 305 atomic_store_int(state, STATE_RUNNING); 306 } 307 308 /* Get current clock frequency for the given cpu id. */ 309 int 310 cpu_est_clockrate(int cpu_id, uint64_t *rate) 311 { 312 uint64_t tsc1, tsc2; 313 uint64_t acnt, mcnt, perf; 314 register_t reg; 315 316 if (pcpu_find(cpu_id) == NULL || rate == NULL) 317 return (EINVAL); 318 #ifdef __i386__ 319 if ((cpu_feature & CPUID_TSC) == 0) 320 return (EOPNOTSUPP); 321 #endif 322 323 /* 324 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist, 325 * DELAY(9) based logic fails. 326 */ 327 if (tsc_is_invariant && !tsc_perf_stat) 328 return (EOPNOTSUPP); 329 330 #ifdef SMP 331 if (smp_cpus > 1) { 332 /* Schedule ourselves on the indicated cpu. */ 333 thread_lock(curthread); 334 sched_bind(curthread, cpu_id); 335 thread_unlock(curthread); 336 } 337 #endif 338 339 /* Calibrate by measuring a short delay. */ 340 reg = intr_disable(); 341 if (tsc_is_invariant) { 342 wrmsr(MSR_MPERF, 0); 343 wrmsr(MSR_APERF, 0); 344 tsc1 = rdtsc(); 345 DELAY(1000); 346 mcnt = rdmsr(MSR_MPERF); 347 acnt = rdmsr(MSR_APERF); 348 tsc2 = rdtsc(); 349 intr_restore(reg); 350 perf = 1000 * acnt / mcnt; 351 *rate = (tsc2 - tsc1) * perf; 352 } else { 353 tsc1 = rdtsc(); 354 DELAY(1000); 355 tsc2 = rdtsc(); 356 intr_restore(reg); 357 *rate = (tsc2 - tsc1) * 1000; 358 } 359 360 #ifdef SMP 361 if (smp_cpus > 1) { 362 thread_lock(curthread); 363 sched_unbind(curthread); 364 thread_unlock(curthread); 365 } 366 #endif 367 368 return (0); 369 } 370 371 /* 372 * Shutdown the CPU as much as possible 373 */ 374 void 375 cpu_halt(void) 376 { 377 for (;;) 378 halt(); 379 } 380 381 static void 382 cpu_reset_real(void) 383 { 384 struct region_descriptor null_idt; 385 int b; 386 387 disable_intr(); 388 #ifdef CPU_ELAN 389 if (elan_mmcr != NULL) 390 elan_mmcr->RESCFG = 1; 391 #endif 392 #ifdef __i386__ 393 if (cpu == CPU_GEODE1100) { 394 /* Attempt Geode's own reset */ 395 outl(0xcf8, 0x80009044ul); 396 outl(0xcfc, 0xf); 397 } 398 #endif 399 #if !defined(BROKEN_KEYBOARD_RESET) 400 /* 401 * Attempt to do a CPU reset via the keyboard controller, 402 * do not turn off GateA20, as any machine that fails 403 * to do the reset here would then end up in no man's land. 404 */ 405 outb(IO_KBD + 4, 0xFE); 406 DELAY(500000); /* wait 0.5 sec to see if that did it */ 407 #endif 408 409 /* 410 * Attempt to force a reset via the Reset Control register at 411 * I/O port 0xcf9. Bit 2 forces a system reset when it 412 * transitions from 0 to 1. Bit 1 selects the type of reset 413 * to attempt: 0 selects a "soft" reset, and 1 selects a 414 * "hard" reset. We try a "hard" reset. The first write sets 415 * bit 1 to select a "hard" reset and clears bit 2. The 416 * second write forces a 0 -> 1 transition in bit 2 to trigger 417 * a reset. 418 */ 419 outb(0xcf9, 0x2); 420 outb(0xcf9, 0x6); 421 DELAY(500000); /* wait 0.5 sec to see if that did it */ 422 423 /* 424 * Attempt to force a reset via the Fast A20 and Init register 425 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. 426 * Bit 0 asserts INIT# when set to 1. We are careful to only 427 * preserve bit 1 while setting bit 0. We also must clear bit 428 * 0 before setting it if it isn't already clear. 429 */ 430 b = inb(0x92); 431 if (b != 0xff) { 432 if ((b & 0x1) != 0) 433 outb(0x92, b & 0xfe); 434 outb(0x92, b | 0x1); 435 DELAY(500000); /* wait 0.5 sec to see if that did it */ 436 } 437 438 printf("No known reset method worked, attempting CPU shutdown\n"); 439 DELAY(1000000); /* wait 1 sec for printf to complete */ 440 441 /* Wipe the IDT. */ 442 null_idt.rd_limit = 0; 443 null_idt.rd_base = 0; 444 lidt(&null_idt); 445 446 /* "good night, sweet prince .... <THUNK!>" */ 447 breakpoint(); 448 449 /* NOTREACHED */ 450 while(1); 451 } 452 453 #ifdef SMP 454 static void 455 cpu_reset_proxy(void) 456 { 457 458 cpu_reset_proxy_active = 1; 459 while (cpu_reset_proxy_active == 1) 460 ia32_pause(); /* Wait for other cpu to see that we've started */ 461 462 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); 463 DELAY(1000000); 464 cpu_reset_real(); 465 } 466 #endif 467 468 void 469 cpu_reset(void) 470 { 471 #ifdef SMP 472 struct monitorbuf *mb; 473 cpuset_t map; 474 u_int cnt; 475 476 if (smp_started) { 477 map = all_cpus; 478 CPU_CLR(PCPU_GET(cpuid), &map); 479 CPU_ANDNOT(&map, &map, &stopped_cpus); 480 if (!CPU_EMPTY(&map)) { 481 printf("cpu_reset: Stopping other CPUs\n"); 482 stop_cpus(map); 483 } 484 485 if (PCPU_GET(cpuid) != 0) { 486 cpu_reset_proxyid = PCPU_GET(cpuid); 487 cpustop_restartfunc = cpu_reset_proxy; 488 cpu_reset_proxy_active = 0; 489 printf("cpu_reset: Restarting BSP\n"); 490 491 /* Restart CPU #0. */ 492 CPU_SETOF(0, &started_cpus); 493 mb = &pcpu_find(0)->pc_monitorbuf; 494 atomic_store_int(&mb->stop_state, 495 MONITOR_STOPSTATE_RUNNING); 496 497 cnt = 0; 498 while (cpu_reset_proxy_active == 0 && cnt < 10000000) { 499 ia32_pause(); 500 cnt++; /* Wait for BSP to announce restart */ 501 } 502 if (cpu_reset_proxy_active == 0) { 503 printf("cpu_reset: Failed to restart BSP\n"); 504 } else { 505 cpu_reset_proxy_active = 2; 506 while (1) 507 ia32_pause(); 508 /* NOTREACHED */ 509 } 510 } 511 512 DELAY(1000000); 513 } 514 #endif 515 cpu_reset_real(); 516 /* NOTREACHED */ 517 } 518 519 bool 520 cpu_mwait_usable(void) 521 { 522 523 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags & 524 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) == 525 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK))); 526 } 527 528 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */ 529 530 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */ 531 532 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */ 533 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait, 534 0, "Use MONITOR/MWAIT for short idle"); 535 536 static bool 537 cpu_idle_enter(int *statep, int newstate) 538 { 539 KASSERT(atomic_load_int(statep) == STATE_RUNNING, 540 ("%s: state %d", __func__, atomic_load_int(statep))); 541 542 /* 543 * A fence is needed to prevent reordering of the load in 544 * sched_runnable() with this store to the idle state word. Without it, 545 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having 546 * added load to the queue, and elide an IPI. Then, sched_runnable() 547 * can observe tdq_load == 0, so the CPU ends up idling with pending 548 * work. tdq_notify() similarly ensures that a prior update to tdq_load 549 * is visible before calling cpu_idle_wakeup(). 550 */ 551 atomic_store_int(statep, newstate); 552 #if defined(SCHED_ULE) && defined(SMP) 553 atomic_thread_fence_seq_cst(); 554 #endif 555 556 /* 557 * Since we may be in a critical section from cpu_idle(), if 558 * an interrupt fires during that critical section we may have 559 * a pending preemption. If the CPU halts, then that thread 560 * may not execute until a later interrupt awakens the CPU. 561 * To handle this race, check for a runnable thread after 562 * disabling interrupts and immediately return if one is 563 * found. Also, we must absolutely guarentee that hlt is 564 * the next instruction after sti. This ensures that any 565 * interrupt that fires after the call to disable_intr() will 566 * immediately awaken the CPU from hlt. Finally, please note 567 * that on x86 this works fine because of interrupts enabled only 568 * after the instruction following sti takes place, while IF is set 569 * to 1 immediately, allowing hlt instruction to acknowledge the 570 * interrupt. 571 */ 572 disable_intr(); 573 if (sched_runnable()) { 574 enable_intr(); 575 atomic_store_int(statep, STATE_RUNNING); 576 return (false); 577 } else { 578 return (true); 579 } 580 } 581 582 static void 583 cpu_idle_exit(int *statep) 584 { 585 atomic_store_int(statep, STATE_RUNNING); 586 } 587 588 static void 589 cpu_idle_acpi(sbintime_t sbt) 590 { 591 int *state; 592 593 state = &PCPU_PTR(monitorbuf)->idle_state; 594 if (cpu_idle_enter(state, STATE_SLEEPING)) { 595 if (cpu_idle_hook) 596 cpu_idle_hook(sbt); 597 else 598 acpi_cpu_c1(); 599 cpu_idle_exit(state); 600 } 601 } 602 603 static void 604 cpu_idle_hlt(sbintime_t sbt) 605 { 606 int *state; 607 608 state = &PCPU_PTR(monitorbuf)->idle_state; 609 if (cpu_idle_enter(state, STATE_SLEEPING)) { 610 acpi_cpu_c1(); 611 atomic_store_int(state, STATE_RUNNING); 612 } 613 } 614 615 static void 616 cpu_idle_mwait(sbintime_t sbt) 617 { 618 int *state; 619 620 state = &PCPU_PTR(monitorbuf)->idle_state; 621 if (cpu_idle_enter(state, STATE_MWAIT)) { 622 cpu_monitor(state, 0, 0); 623 if (atomic_load_int(state) == STATE_MWAIT) 624 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0)); 625 else 626 enable_intr(); 627 cpu_idle_exit(state); 628 } 629 } 630 631 static void 632 cpu_idle_spin(sbintime_t sbt) 633 { 634 int *state; 635 int i; 636 637 state = &PCPU_PTR(monitorbuf)->idle_state; 638 atomic_store_int(state, STATE_RUNNING); 639 640 /* 641 * The sched_runnable() call is racy but as long as there is 642 * a loop missing it one time will have just a little impact if any 643 * (and it is much better than missing the check at all). 644 */ 645 for (i = 0; i < 1000; i++) { 646 if (sched_runnable()) 647 return; 648 cpu_spinwait(); 649 } 650 } 651 652 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi; 653 654 void 655 cpu_idle(int busy) 656 { 657 uint64_t msr; 658 sbintime_t sbt = -1; 659 660 CTR1(KTR_SPARE2, "cpu_idle(%d)", busy); 661 #ifdef MP_WATCHDOG 662 ap_watchdog(PCPU_GET(cpuid)); 663 #endif 664 665 /* If we are busy - try to use fast methods. */ 666 if (busy) { 667 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) { 668 cpu_idle_mwait(busy); 669 goto out; 670 } 671 } 672 673 /* If we have time - switch timers into idle mode. */ 674 if (!busy) { 675 critical_enter(); 676 sbt = cpu_idleclock(); 677 } 678 679 /* Apply AMD APIC timer C1E workaround. */ 680 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) { 681 msr = rdmsr(MSR_AMDK8_IPM); 682 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0) 683 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT | 684 AMDK8_C1EONCMPHALT)); 685 } 686 687 /* Call main idle method. */ 688 cpu_idle_fn(sbt); 689 690 /* Switch timers back into active mode. */ 691 if (!busy) { 692 cpu_activeclock(); 693 critical_exit(); 694 } 695 out: 696 CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy); 697 } 698 699 static int cpu_idle_apl31_workaround; 700 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW, 701 &cpu_idle_apl31_workaround, 0, 702 "Apollo Lake APL31 MWAIT bug workaround"); 703 704 int 705 cpu_idle_wakeup(int cpu) 706 { 707 struct monitorbuf *mb; 708 int *state; 709 710 mb = &pcpu_find(cpu)->pc_monitorbuf; 711 state = &mb->idle_state; 712 switch (atomic_load_int(state)) { 713 case STATE_SLEEPING: 714 return (0); 715 case STATE_MWAIT: 716 atomic_store_int(state, STATE_RUNNING); 717 return (cpu_idle_apl31_workaround ? 0 : 1); 718 case STATE_RUNNING: 719 return (1); 720 default: 721 panic("bad monitor state"); 722 return (1); 723 } 724 } 725 726 /* 727 * Ordered by speed/power consumption. 728 */ 729 static struct { 730 void *id_fn; 731 char *id_name; 732 int id_cpuid2_flag; 733 } idle_tbl[] = { 734 { .id_fn = cpu_idle_spin, .id_name = "spin" }, 735 { .id_fn = cpu_idle_mwait, .id_name = "mwait", 736 .id_cpuid2_flag = CPUID2_MON }, 737 { .id_fn = cpu_idle_hlt, .id_name = "hlt" }, 738 { .id_fn = cpu_idle_acpi, .id_name = "acpi" }, 739 }; 740 741 static int 742 idle_sysctl_available(SYSCTL_HANDLER_ARGS) 743 { 744 char *avail, *p; 745 int error; 746 int i; 747 748 avail = malloc(256, M_TEMP, M_WAITOK); 749 p = avail; 750 for (i = 0; i < nitems(idle_tbl); i++) { 751 if (idle_tbl[i].id_cpuid2_flag != 0 && 752 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) 753 continue; 754 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 755 cpu_idle_hook == NULL) 756 continue; 757 p += sprintf(p, "%s%s", p != avail ? ", " : "", 758 idle_tbl[i].id_name); 759 } 760 error = sysctl_handle_string(oidp, avail, 0, req); 761 free(avail, M_TEMP); 762 return (error); 763 } 764 765 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, 766 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 767 0, 0, idle_sysctl_available, "A", 768 "list of available idle functions"); 769 770 static bool 771 cpu_idle_selector(const char *new_idle_name) 772 { 773 int i; 774 775 for (i = 0; i < nitems(idle_tbl); i++) { 776 if (idle_tbl[i].id_cpuid2_flag != 0 && 777 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) 778 continue; 779 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 780 cpu_idle_hook == NULL) 781 continue; 782 if (strcmp(idle_tbl[i].id_name, new_idle_name)) 783 continue; 784 cpu_idle_fn = idle_tbl[i].id_fn; 785 if (bootverbose) 786 printf("CPU idle set to %s\n", idle_tbl[i].id_name); 787 return (true); 788 } 789 return (false); 790 } 791 792 static int 793 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS) 794 { 795 char buf[16], *p; 796 int error, i; 797 798 p = "unknown"; 799 for (i = 0; i < nitems(idle_tbl); i++) { 800 if (idle_tbl[i].id_fn == cpu_idle_fn) { 801 p = idle_tbl[i].id_name; 802 break; 803 } 804 } 805 strncpy(buf, p, sizeof(buf)); 806 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 807 if (error != 0 || req->newptr == NULL) 808 return (error); 809 return (cpu_idle_selector(buf) ? 0 : EINVAL); 810 } 811 812 SYSCTL_PROC(_machdep, OID_AUTO, idle, 813 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 814 0, 0, cpu_idle_sysctl, "A", 815 "currently selected idle function"); 816 817 static void 818 cpu_idle_tun(void *unused __unused) 819 { 820 char tunvar[16]; 821 822 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar))) 823 cpu_idle_selector(tunvar); 824 else if (cpu_vendor_id == CPU_VENDOR_AMD && 825 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) { 826 /* Ryzen erratas 1057, 1109. */ 827 cpu_idle_selector("hlt"); 828 idle_mwait = 0; 829 mwait_cpustop_broken = true; 830 } 831 832 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) { 833 /* 834 * Apollo Lake errata APL31 (public errata APL30). 835 * Stores to the armed address range may not trigger 836 * MWAIT to resume execution. OS needs to use 837 * interrupts to wake processors from MWAIT-induced 838 * sleep states. 839 */ 840 cpu_idle_apl31_workaround = 1; 841 mwait_cpustop_broken = true; 842 } 843 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround); 844 } 845 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL); 846 847 static int panic_on_nmi = 0xff; 848 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN, 849 &panic_on_nmi, 0, 850 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all"); 851 int nmi_is_broadcast = 1; 852 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN, 853 &nmi_is_broadcast, 0, 854 "Chipset NMI is broadcast"); 855 int (*apei_nmi)(void); 856 857 void 858 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame) 859 { 860 bool claimed = false; 861 862 #ifdef DEV_ISA 863 /* machine/parity/power fail/"kitchen sink" faults */ 864 if (isa_nmi(frame->tf_err)) { 865 claimed = true; 866 if ((panic_on_nmi & 1) != 0) 867 panic("NMI indicates hardware failure"); 868 } 869 #endif /* DEV_ISA */ 870 871 /* ACPI Platform Error Interfaces callback. */ 872 if (apei_nmi != NULL && (*apei_nmi)()) 873 claimed = true; 874 875 /* 876 * NMIs can be useful for debugging. They can be hooked up to a 877 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be 878 * generated by an IPMI BMC, either manually or in response to a 879 * watchdog timeout. For example, see the "power diag" command in 880 * ports/sysutils/ipmitool. They can also be generated by a 881 * hypervisor; see "bhyvectl --inject-nmi". 882 */ 883 884 #ifdef KDB 885 if (!claimed && (panic_on_nmi & 2) != 0) { 886 if (debugger_on_panic) { 887 printf("NMI/cpu%d ... going to debugger\n", cpu); 888 claimed = kdb_trap(type, 0, frame); 889 } 890 } 891 #endif /* KDB */ 892 893 if (!claimed && panic_on_nmi != 0) 894 panic("NMI"); 895 } 896 897 void 898 nmi_handle_intr(u_int type, struct trapframe *frame) 899 { 900 901 #ifdef SMP 902 if (nmi_is_broadcast) { 903 nmi_call_kdb_smp(type, frame); 904 return; 905 } 906 #endif 907 nmi_call_kdb(PCPU_GET(cpuid), type, frame); 908 } 909 910 static int hw_ibrs_active; 911 int hw_ibrs_ibpb_active; 912 int hw_ibrs_disable = 1; 913 914 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0, 915 "Indirect Branch Restricted Speculation active"); 916 917 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs, 918 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 919 "Indirect Branch Restricted Speculation active"); 920 921 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD, 922 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active"); 923 924 void 925 hw_ibrs_recalculate(bool for_all_cpus) 926 { 927 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) { 928 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ? 929 MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) | 930 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR), 931 IA32_SPEC_CTRL_IBRS, NULL); 932 hw_ibrs_active = hw_ibrs_disable == 0; 933 hw_ibrs_ibpb_active = 0; 934 } else { 935 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 & 936 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable; 937 } 938 } 939 940 static int 941 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS) 942 { 943 int error, val; 944 945 val = hw_ibrs_disable; 946 error = sysctl_handle_int(oidp, &val, 0, req); 947 if (error != 0 || req->newptr == NULL) 948 return (error); 949 hw_ibrs_disable = val != 0; 950 hw_ibrs_recalculate(true); 951 return (0); 952 } 953 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN | 954 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I", 955 "Disable Indirect Branch Restricted Speculation"); 956 957 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT | 958 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 959 hw_ibrs_disable_handler, "I", 960 "Disable Indirect Branch Restricted Speculation"); 961 962 int hw_ssb_active; 963 int hw_ssb_disable; 964 965 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD, 966 &hw_ssb_active, 0, 967 "Speculative Store Bypass Disable active"); 968 969 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb, 970 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 971 "Speculative Store Bypass Disable active"); 972 973 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD, 974 &hw_ssb_active, 0, "Speculative Store Bypass Disable active"); 975 976 static void 977 hw_ssb_set(bool enable, bool for_all_cpus) 978 { 979 980 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) { 981 hw_ssb_active = 0; 982 return; 983 } 984 hw_ssb_active = enable; 985 x86_msr_op(MSR_IA32_SPEC_CTRL, 986 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 987 (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL), 988 IA32_SPEC_CTRL_SSBD, NULL); 989 } 990 991 void 992 hw_ssb_recalculate(bool all_cpus) 993 { 994 995 switch (hw_ssb_disable) { 996 default: 997 hw_ssb_disable = 0; 998 /* FALLTHROUGH */ 999 case 0: /* off */ 1000 hw_ssb_set(false, all_cpus); 1001 break; 1002 case 1: /* on */ 1003 hw_ssb_set(true, all_cpus); 1004 break; 1005 case 2: /* auto */ 1006 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ? 1007 false : true, all_cpus); 1008 break; 1009 } 1010 } 1011 1012 static int 1013 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS) 1014 { 1015 int error, val; 1016 1017 val = hw_ssb_disable; 1018 error = sysctl_handle_int(oidp, &val, 0, req); 1019 if (error != 0 || req->newptr == NULL) 1020 return (error); 1021 hw_ssb_disable = val; 1022 hw_ssb_recalculate(true); 1023 return (0); 1024 } 1025 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT | 1026 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1027 hw_ssb_disable_handler, "I", 1028 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)"); 1029 1030 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT | 1031 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1032 hw_ssb_disable_handler, "I", 1033 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)"); 1034 1035 int hw_mds_disable; 1036 1037 /* 1038 * Handler for Microarchitectural Data Sampling issues. Really not a 1039 * pointer to C function: on amd64 the code must not change any CPU 1040 * architectural state except possibly %rflags. Also, it is always 1041 * called with interrupts disabled. 1042 */ 1043 void mds_handler_void(void); 1044 void mds_handler_verw(void); 1045 void mds_handler_ivb(void); 1046 void mds_handler_bdw(void); 1047 void mds_handler_skl_sse(void); 1048 void mds_handler_skl_avx(void); 1049 void mds_handler_skl_avx512(void); 1050 void mds_handler_silvermont(void); 1051 void (*mds_handler)(void) = mds_handler_void; 1052 1053 static int 1054 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS) 1055 { 1056 const char *state; 1057 1058 if (mds_handler == mds_handler_void) 1059 state = "inactive"; 1060 else if (mds_handler == mds_handler_verw) 1061 state = "VERW"; 1062 else if (mds_handler == mds_handler_ivb) 1063 state = "software IvyBridge"; 1064 else if (mds_handler == mds_handler_bdw) 1065 state = "software Broadwell"; 1066 else if (mds_handler == mds_handler_skl_sse) 1067 state = "software Skylake SSE"; 1068 else if (mds_handler == mds_handler_skl_avx) 1069 state = "software Skylake AVX"; 1070 else if (mds_handler == mds_handler_skl_avx512) 1071 state = "software Skylake AVX512"; 1072 else if (mds_handler == mds_handler_silvermont) 1073 state = "software Silvermont"; 1074 else 1075 state = "unknown"; 1076 return (SYSCTL_OUT(req, state, strlen(state))); 1077 } 1078 1079 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state, 1080 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1081 sysctl_hw_mds_disable_state_handler, "A", 1082 "Microarchitectural Data Sampling Mitigation state"); 1083 1084 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds, 1085 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1086 "Microarchitectural Data Sampling Mitigation state"); 1087 1088 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state, 1089 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1090 sysctl_hw_mds_disable_state_handler, "A", 1091 "Microarchitectural Data Sampling Mitigation state"); 1092 1093 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512"); 1094 1095 void 1096 hw_mds_recalculate(void) 1097 { 1098 struct pcpu *pc; 1099 vm_offset_t b64; 1100 u_long xcr0; 1101 int i; 1102 1103 /* 1104 * Allow user to force VERW variant even if MD_CLEAR is not 1105 * reported. For instance, hypervisor might unknowingly 1106 * filter the cap out. 1107 * For the similar reasons, and for testing, allow to enable 1108 * mitigation even when MDS_NO cap is set. 1109 */ 1110 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 || 1111 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 && 1112 hw_mds_disable == 3)) { 1113 mds_handler = mds_handler_void; 1114 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 && 1115 hw_mds_disable == 3) || hw_mds_disable == 1) { 1116 mds_handler = mds_handler_verw; 1117 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1118 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e || 1119 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a || 1120 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 || 1121 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d || 1122 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e || 1123 CPUID_TO_MODEL(cpu_id) == 0x3a) && 1124 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1125 /* 1126 * Nehalem, SandyBridge, IvyBridge 1127 */ 1128 CPU_FOREACH(i) { 1129 pc = pcpu_find(i); 1130 if (pc->pc_mds_buf == NULL) { 1131 pc->pc_mds_buf = malloc_domainset(672, M_TEMP, 1132 DOMAINSET_PREF(pc->pc_domain), M_WAITOK); 1133 bzero(pc->pc_mds_buf, 16); 1134 } 1135 } 1136 mds_handler = mds_handler_ivb; 1137 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1138 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c || 1139 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 || 1140 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f || 1141 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) && 1142 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1143 /* 1144 * Haswell, Broadwell 1145 */ 1146 CPU_FOREACH(i) { 1147 pc = pcpu_find(i); 1148 if (pc->pc_mds_buf == NULL) { 1149 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP, 1150 DOMAINSET_PREF(pc->pc_domain), M_WAITOK); 1151 bzero(pc->pc_mds_buf, 16); 1152 } 1153 } 1154 mds_handler = mds_handler_bdw; 1155 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1156 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id & 1157 CPUID_STEPPING) <= 5) || 1158 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e || 1159 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id & 1160 CPUID_STEPPING) <= 0xb) || 1161 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id & 1162 CPUID_STEPPING) <= 0xc)) && 1163 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1164 /* 1165 * Skylake, KabyLake, CoffeeLake, WhiskeyLake, 1166 * CascadeLake 1167 */ 1168 CPU_FOREACH(i) { 1169 pc = pcpu_find(i); 1170 if (pc->pc_mds_buf == NULL) { 1171 pc->pc_mds_buf = malloc_domainset(6 * 1024, 1172 M_TEMP, DOMAINSET_PREF(pc->pc_domain), 1173 M_WAITOK); 1174 b64 = (vm_offset_t)malloc_domainset(64 + 63, 1175 M_TEMP, DOMAINSET_PREF(pc->pc_domain), 1176 M_WAITOK); 1177 pc->pc_mds_buf64 = (void *)roundup2(b64, 64); 1178 bzero(pc->pc_mds_buf64, 64); 1179 } 1180 } 1181 xcr0 = rxcr(0); 1182 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 && 1183 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0) 1184 mds_handler = mds_handler_skl_avx512; 1185 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 && 1186 (cpu_feature2 & CPUID2_AVX) != 0) 1187 mds_handler = mds_handler_skl_avx; 1188 else 1189 mds_handler = mds_handler_skl_sse; 1190 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1191 ((CPUID_TO_MODEL(cpu_id) == 0x37 || 1192 CPUID_TO_MODEL(cpu_id) == 0x4a || 1193 CPUID_TO_MODEL(cpu_id) == 0x4c || 1194 CPUID_TO_MODEL(cpu_id) == 0x4d || 1195 CPUID_TO_MODEL(cpu_id) == 0x5a || 1196 CPUID_TO_MODEL(cpu_id) == 0x5d || 1197 CPUID_TO_MODEL(cpu_id) == 0x6e || 1198 CPUID_TO_MODEL(cpu_id) == 0x65 || 1199 CPUID_TO_MODEL(cpu_id) == 0x75 || 1200 CPUID_TO_MODEL(cpu_id) == 0x1c || 1201 CPUID_TO_MODEL(cpu_id) == 0x26 || 1202 CPUID_TO_MODEL(cpu_id) == 0x27 || 1203 CPUID_TO_MODEL(cpu_id) == 0x35 || 1204 CPUID_TO_MODEL(cpu_id) == 0x36 || 1205 CPUID_TO_MODEL(cpu_id) == 0x7a))) { 1206 /* Silvermont, Airmont */ 1207 CPU_FOREACH(i) { 1208 pc = pcpu_find(i); 1209 if (pc->pc_mds_buf == NULL) 1210 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK); 1211 } 1212 mds_handler = mds_handler_silvermont; 1213 } else { 1214 hw_mds_disable = 0; 1215 mds_handler = mds_handler_void; 1216 } 1217 } 1218 1219 static void 1220 hw_mds_recalculate_boot(void *arg __unused) 1221 { 1222 1223 hw_mds_recalculate(); 1224 } 1225 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL); 1226 1227 static int 1228 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS) 1229 { 1230 int error, val; 1231 1232 val = hw_mds_disable; 1233 error = sysctl_handle_int(oidp, &val, 0, req); 1234 if (error != 0 || req->newptr == NULL) 1235 return (error); 1236 if (val < 0 || val > 3) 1237 return (EINVAL); 1238 hw_mds_disable = val; 1239 hw_mds_recalculate(); 1240 return (0); 1241 } 1242 1243 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT | 1244 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1245 sysctl_mds_disable_handler, "I", 1246 "Microarchitectural Data Sampling Mitigation " 1247 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)"); 1248 1249 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT | 1250 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1251 sysctl_mds_disable_handler, "I", 1252 "Microarchitectural Data Sampling Mitigation " 1253 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)"); 1254 1255 /* 1256 * Intel Transactional Memory Asynchronous Abort Mitigation 1257 * CVE-2019-11135 1258 */ 1259 int x86_taa_enable; 1260 int x86_taa_state; 1261 enum { 1262 TAA_NONE = 0, /* No mitigation enabled */ 1263 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */ 1264 TAA_VERW = 2, /* Use VERW mitigation */ 1265 TAA_AUTO = 3, /* Automatically select the mitigation */ 1266 1267 /* The states below are not selectable by the operator */ 1268 1269 TAA_TAA_UC = 4, /* Mitigation present in microcode */ 1270 TAA_NOT_PRESENT = 5 /* TSX is not present */ 1271 }; 1272 1273 static void 1274 taa_set(bool enable, bool all) 1275 { 1276 1277 x86_msr_op(MSR_IA32_TSX_CTRL, 1278 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 1279 (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL), 1280 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR, 1281 NULL); 1282 } 1283 1284 void 1285 x86_taa_recalculate(void) 1286 { 1287 static int taa_saved_mds_disable = 0; 1288 int taa_need = 0, taa_state = 0; 1289 int mds_disable = 0, need_mds_recalc = 0; 1290 1291 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */ 1292 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 || 1293 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) { 1294 /* TSX is not present */ 1295 x86_taa_state = TAA_NOT_PRESENT; 1296 return; 1297 } 1298 1299 /* Check to see what mitigation options the CPU gives us */ 1300 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) { 1301 /* CPU is not suseptible to TAA */ 1302 taa_need = TAA_TAA_UC; 1303 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) { 1304 /* 1305 * CPU can turn off TSX. This is the next best option 1306 * if TAA_NO hardware mitigation isn't present 1307 */ 1308 taa_need = TAA_TSX_DISABLE; 1309 } else { 1310 /* No TSX/TAA specific remedies are available. */ 1311 if (x86_taa_enable == TAA_TSX_DISABLE) { 1312 if (bootverbose) 1313 printf("TSX control not available\n"); 1314 return; 1315 } else 1316 taa_need = TAA_VERW; 1317 } 1318 1319 /* Can we automatically take action, or are we being forced? */ 1320 if (x86_taa_enable == TAA_AUTO) 1321 taa_state = taa_need; 1322 else 1323 taa_state = x86_taa_enable; 1324 1325 /* No state change, nothing to do */ 1326 if (taa_state == x86_taa_state) { 1327 if (bootverbose) 1328 printf("No TSX change made\n"); 1329 return; 1330 } 1331 1332 /* Does the MSR need to be turned on or off? */ 1333 if (taa_state == TAA_TSX_DISABLE) 1334 taa_set(true, true); 1335 else if (x86_taa_state == TAA_TSX_DISABLE) 1336 taa_set(false, true); 1337 1338 /* Does MDS need to be set to turn on VERW? */ 1339 if (taa_state == TAA_VERW) { 1340 taa_saved_mds_disable = hw_mds_disable; 1341 mds_disable = hw_mds_disable = 1; 1342 need_mds_recalc = 1; 1343 } else if (x86_taa_state == TAA_VERW) { 1344 mds_disable = hw_mds_disable = taa_saved_mds_disable; 1345 need_mds_recalc = 1; 1346 } 1347 if (need_mds_recalc) { 1348 hw_mds_recalculate(); 1349 if (mds_disable != hw_mds_disable) { 1350 if (bootverbose) 1351 printf("Cannot change MDS state for TAA\n"); 1352 /* Don't update our state */ 1353 return; 1354 } 1355 } 1356 1357 x86_taa_state = taa_state; 1358 return; 1359 } 1360 1361 static void 1362 taa_recalculate_boot(void * arg __unused) 1363 { 1364 1365 x86_taa_recalculate(); 1366 } 1367 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL); 1368 1369 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa, 1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1371 "TSX Asynchronous Abort Mitigation"); 1372 1373 static int 1374 sysctl_taa_handler(SYSCTL_HANDLER_ARGS) 1375 { 1376 int error, val; 1377 1378 val = x86_taa_enable; 1379 error = sysctl_handle_int(oidp, &val, 0, req); 1380 if (error != 0 || req->newptr == NULL) 1381 return (error); 1382 if (val < TAA_NONE || val > TAA_AUTO) 1383 return (EINVAL); 1384 x86_taa_enable = val; 1385 x86_taa_recalculate(); 1386 return (0); 1387 } 1388 1389 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT | 1390 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1391 sysctl_taa_handler, "I", 1392 "TAA Mitigation enablement control " 1393 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)"); 1394 1395 static int 1396 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS) 1397 { 1398 const char *state; 1399 1400 switch (x86_taa_state) { 1401 case TAA_NONE: 1402 state = "inactive"; 1403 break; 1404 case TAA_TSX_DISABLE: 1405 state = "TSX disabled"; 1406 break; 1407 case TAA_VERW: 1408 state = "VERW"; 1409 break; 1410 case TAA_TAA_UC: 1411 state = "Mitigated in microcode"; 1412 break; 1413 case TAA_NOT_PRESENT: 1414 state = "TSX not present"; 1415 break; 1416 default: 1417 state = "unknown"; 1418 } 1419 1420 return (SYSCTL_OUT(req, state, strlen(state))); 1421 } 1422 1423 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state, 1424 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1425 sysctl_taa_state_handler, "A", 1426 "TAA Mitigation state"); 1427 1428 int __read_frequently cpu_flush_rsb_ctxsw; 1429 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw, 1430 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0, 1431 "Flush Return Stack Buffer on context switch"); 1432 1433 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds, 1434 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1435 "MCU Optimization, disable RDSEED mitigation"); 1436 1437 int x86_rngds_mitg_enable = 1; 1438 void 1439 x86_rngds_mitg_recalculate(bool all_cpus) 1440 { 1441 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) 1442 return; 1443 x86_msr_op(MSR_IA32_MCU_OPT_CTRL, 1444 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 1445 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL), 1446 IA32_RNGDS_MITG_DIS, NULL); 1447 } 1448 1449 static int 1450 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS) 1451 { 1452 int error, val; 1453 1454 val = x86_rngds_mitg_enable; 1455 error = sysctl_handle_int(oidp, &val, 0, req); 1456 if (error != 0 || req->newptr == NULL) 1457 return (error); 1458 x86_rngds_mitg_enable = val; 1459 x86_rngds_mitg_recalculate(true); 1460 return (0); 1461 } 1462 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT | 1463 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1464 sysctl_rngds_mitg_enable_handler, "I", 1465 "MCU Optimization, disabling RDSEED mitigation control " 1466 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)"); 1467 1468 static int 1469 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS) 1470 { 1471 const char *state; 1472 1473 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) { 1474 state = "Not applicable"; 1475 } else if (x86_rngds_mitg_enable == 0) { 1476 state = "RDSEED not serialized"; 1477 } else { 1478 state = "Mitigated"; 1479 } 1480 return (SYSCTL_OUT(req, state, strlen(state))); 1481 } 1482 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state, 1483 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1484 sysctl_rngds_state_handler, "A", 1485 "MCU Optimization state"); 1486 1487 /* 1488 * Enable and restore kernel text write permissions. 1489 * Callers must ensure that disable_wp()/restore_wp() are executed 1490 * without rescheduling on the same core. 1491 */ 1492 bool 1493 disable_wp(void) 1494 { 1495 u_int cr0; 1496 1497 cr0 = rcr0(); 1498 if ((cr0 & CR0_WP) == 0) 1499 return (false); 1500 load_cr0(cr0 & ~CR0_WP); 1501 return (true); 1502 } 1503 1504 void 1505 restore_wp(bool old_wp) 1506 { 1507 1508 if (old_wp) 1509 load_cr0(rcr0() | CR0_WP); 1510 } 1511 1512 bool 1513 acpi_get_fadt_bootflags(uint16_t *flagsp) 1514 { 1515 #ifdef DEV_ACPI 1516 ACPI_TABLE_FADT *fadt; 1517 vm_paddr_t physaddr; 1518 1519 physaddr = acpi_find_table(ACPI_SIG_FADT); 1520 if (physaddr == 0) 1521 return (false); 1522 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT); 1523 if (fadt == NULL) 1524 return (false); 1525 *flagsp = fadt->BootFlags; 1526 acpi_unmap_table(fadt); 1527 return (true); 1528 #else 1529 return (false); 1530 #endif 1531 } 1532 1533 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void)) 1534 { 1535 bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD || 1536 cpu_vendor_id == CPU_VENDOR_HYGON; 1537 1538 if ((amd_feature & AMDID_RDTSCP) != 0) 1539 return (rdtscp); 1540 else if ((cpu_feature & CPUID_SSE2) != 0) 1541 return (cpu_is_amd ? rdtsc_ordered_mfence : 1542 rdtsc_ordered_lfence); 1543 else 1544 return (rdtsc); 1545 } 1546