1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_acpi.h" 45 #include "opt_atpic.h" 46 #include "opt_cpu.h" 47 #include "opt_ddb.h" 48 #include "opt_inet.h" 49 #include "opt_isa.h" 50 #include "opt_kdb.h" 51 #include "opt_kstack_pages.h" 52 #include "opt_maxmem.h" 53 #include "opt_mp_watchdog.h" 54 #include "opt_platform.h" 55 #ifdef __i386__ 56 #include "opt_apic.h" 57 #endif 58 59 #include <sys/param.h> 60 #include <sys/proc.h> 61 #include <sys/systm.h> 62 #include <sys/bus.h> 63 #include <sys/cpu.h> 64 #include <sys/domainset.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/ktr.h> 68 #include <sys/lock.h> 69 #include <sys/malloc.h> 70 #include <sys/mutex.h> 71 #include <sys/pcpu.h> 72 #include <sys/rwlock.h> 73 #include <sys/sched.h> 74 #include <sys/smp.h> 75 #include <sys/sysctl.h> 76 77 #include <machine/clock.h> 78 #include <machine/cpu.h> 79 #include <machine/cputypes.h> 80 #include <machine/specialreg.h> 81 #include <machine/md_var.h> 82 #include <machine/mp_watchdog.h> 83 #include <machine/tss.h> 84 #ifdef SMP 85 #include <machine/smp.h> 86 #endif 87 #ifdef CPU_ELAN 88 #include <machine/elan_mmcr.h> 89 #endif 90 #include <x86/acpica_machdep.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vm_kern.h> 95 #include <vm/vm_page.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_object.h> 98 #include <vm/vm_pager.h> 99 #include <vm/vm_param.h> 100 101 #include <isa/isareg.h> 102 103 #include <contrib/dev/acpica/include/acpi.h> 104 105 #define STATE_RUNNING 0x0 106 #define STATE_MWAIT 0x1 107 #define STATE_SLEEPING 0x2 108 109 #ifdef SMP 110 static u_int cpu_reset_proxyid; 111 static volatile u_int cpu_reset_proxy_active; 112 #endif 113 114 char bootmethod[16]; 115 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0, 116 "System firmware boot method"); 117 118 struct msr_op_arg { 119 u_int msr; 120 int op; 121 uint64_t arg1; 122 uint64_t *res; 123 }; 124 125 static void 126 x86_msr_op_one(void *argp) 127 { 128 struct msr_op_arg *a; 129 uint64_t v; 130 131 a = argp; 132 switch (a->op) { 133 case MSR_OP_ANDNOT: 134 v = rdmsr(a->msr); 135 v &= ~a->arg1; 136 wrmsr(a->msr, v); 137 break; 138 case MSR_OP_OR: 139 v = rdmsr(a->msr); 140 v |= a->arg1; 141 wrmsr(a->msr, v); 142 break; 143 case MSR_OP_WRITE: 144 wrmsr(a->msr, a->arg1); 145 break; 146 case MSR_OP_READ: 147 v = rdmsr(a->msr); 148 *a->res = v; 149 break; 150 } 151 } 152 153 #define MSR_OP_EXMODE_MASK 0xf0000000 154 #define MSR_OP_OP_MASK 0x000000ff 155 #define MSR_OP_GET_CPUID(x) (((x) & ~MSR_OP_EXMODE_MASK) >> 8) 156 157 void 158 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res) 159 { 160 struct thread *td; 161 struct msr_op_arg a; 162 cpuset_t set; 163 u_int exmode; 164 int bound_cpu, cpu, i, is_bound; 165 166 a.op = op & MSR_OP_OP_MASK; 167 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR || 168 a.op == MSR_OP_WRITE || a.op == MSR_OP_READ); 169 exmode = op & MSR_OP_EXMODE_MASK; 170 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL || 171 exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL || 172 exmode == MSR_OP_RENDEZVOUS_ONE); 173 a.msr = msr; 174 a.arg1 = arg1; 175 a.res = res; 176 switch (exmode) { 177 case MSR_OP_LOCAL: 178 x86_msr_op_one(&a); 179 break; 180 case MSR_OP_SCHED_ALL: 181 td = curthread; 182 thread_lock(td); 183 is_bound = sched_is_bound(td); 184 bound_cpu = td->td_oncpu; 185 CPU_FOREACH(i) { 186 sched_bind(td, i); 187 x86_msr_op_one(&a); 188 } 189 if (is_bound) 190 sched_bind(td, bound_cpu); 191 else 192 sched_unbind(td); 193 thread_unlock(td); 194 break; 195 case MSR_OP_SCHED_ONE: 196 td = curthread; 197 cpu = MSR_OP_GET_CPUID(op); 198 thread_lock(td); 199 is_bound = sched_is_bound(td); 200 bound_cpu = td->td_oncpu; 201 if (!is_bound || bound_cpu != cpu) 202 sched_bind(td, cpu); 203 x86_msr_op_one(&a); 204 if (is_bound) { 205 if (bound_cpu != cpu) 206 sched_bind(td, bound_cpu); 207 } else { 208 sched_unbind(td); 209 } 210 thread_unlock(td); 211 break; 212 case MSR_OP_RENDEZVOUS_ALL: 213 smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one, 214 smp_no_rendezvous_barrier, &a); 215 break; 216 case MSR_OP_RENDEZVOUS_ONE: 217 cpu = MSR_OP_GET_CPUID(op); 218 CPU_SETOF(cpu, &set); 219 smp_rendezvous_cpus(set, smp_no_rendezvous_barrier, 220 x86_msr_op_one, smp_no_rendezvous_barrier, &a); 221 break; 222 } 223 } 224 225 /* 226 * Automatically initialized per CPU errata in cpu_idle_tun below. 227 */ 228 bool mwait_cpustop_broken = false; 229 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN, 230 &mwait_cpustop_broken, 0, 231 "Can not reliably wake MONITOR/MWAIT cpus without interrupts"); 232 233 /* 234 * Flush the D-cache for non-DMA I/O so that the I-cache can 235 * be made coherent later. 236 */ 237 void 238 cpu_flush_dcache(void *ptr, size_t len) 239 { 240 /* Not applicable */ 241 } 242 243 void 244 acpi_cpu_c1(void) 245 { 246 247 __asm __volatile("sti; hlt"); 248 } 249 250 /* 251 * Use mwait to pause execution while waiting for an interrupt or 252 * another thread to signal that there is more work. 253 * 254 * NOTE: Interrupts will cause a wakeup; however, this function does 255 * not enable interrupt handling. The caller is responsible to enable 256 * interrupts. 257 */ 258 void 259 acpi_cpu_idle_mwait(uint32_t mwait_hint) 260 { 261 int *state; 262 uint64_t v; 263 264 /* 265 * A comment in Linux patch claims that 'CPUs run faster with 266 * speculation protection disabled. All CPU threads in a core 267 * must disable speculation protection for it to be 268 * disabled. Disable it while we are idle so the other 269 * hyperthread can run fast.' 270 * 271 * XXXKIB. Software coordination mode should be supported, 272 * but all Intel CPUs provide hardware coordination. 273 */ 274 275 state = &PCPU_PTR(monitorbuf)->idle_state; 276 KASSERT(atomic_load_int(state) == STATE_SLEEPING, 277 ("cpu_mwait_cx: wrong monitorbuf state")); 278 atomic_store_int(state, STATE_MWAIT); 279 if (PCPU_GET(ibpb_set) || hw_ssb_active) { 280 v = rdmsr(MSR_IA32_SPEC_CTRL); 281 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS | 282 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD)); 283 } else { 284 v = 0; 285 } 286 cpu_monitor(state, 0, 0); 287 if (atomic_load_int(state) == STATE_MWAIT) 288 cpu_mwait(MWAIT_INTRBREAK, mwait_hint); 289 290 /* 291 * SSB cannot be disabled while we sleep, or rather, if it was 292 * disabled, the sysctl thread will bind to our cpu to tweak 293 * MSR. 294 */ 295 if (v != 0) 296 wrmsr(MSR_IA32_SPEC_CTRL, v); 297 298 /* 299 * We should exit on any event that interrupts mwait, because 300 * that event might be a wanted interrupt. 301 */ 302 atomic_store_int(state, STATE_RUNNING); 303 } 304 305 /* Get current clock frequency for the given cpu id. */ 306 int 307 cpu_est_clockrate(int cpu_id, uint64_t *rate) 308 { 309 uint64_t tsc1, tsc2; 310 uint64_t acnt, mcnt, perf; 311 register_t reg; 312 313 if (pcpu_find(cpu_id) == NULL || rate == NULL) 314 return (EINVAL); 315 #ifdef __i386__ 316 if ((cpu_feature & CPUID_TSC) == 0) 317 return (EOPNOTSUPP); 318 #endif 319 320 /* 321 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist, 322 * DELAY(9) based logic fails. 323 */ 324 if (tsc_is_invariant && !tsc_perf_stat) 325 return (EOPNOTSUPP); 326 327 #ifdef SMP 328 if (smp_cpus > 1) { 329 /* Schedule ourselves on the indicated cpu. */ 330 thread_lock(curthread); 331 sched_bind(curthread, cpu_id); 332 thread_unlock(curthread); 333 } 334 #endif 335 336 /* Calibrate by measuring a short delay. */ 337 reg = intr_disable(); 338 if (tsc_is_invariant) { 339 wrmsr(MSR_MPERF, 0); 340 wrmsr(MSR_APERF, 0); 341 tsc1 = rdtsc(); 342 DELAY(1000); 343 mcnt = rdmsr(MSR_MPERF); 344 acnt = rdmsr(MSR_APERF); 345 tsc2 = rdtsc(); 346 intr_restore(reg); 347 perf = 1000 * acnt / mcnt; 348 *rate = (tsc2 - tsc1) * perf; 349 } else { 350 tsc1 = rdtsc(); 351 DELAY(1000); 352 tsc2 = rdtsc(); 353 intr_restore(reg); 354 *rate = (tsc2 - tsc1) * 1000; 355 } 356 357 #ifdef SMP 358 if (smp_cpus > 1) { 359 thread_lock(curthread); 360 sched_unbind(curthread); 361 thread_unlock(curthread); 362 } 363 #endif 364 365 return (0); 366 } 367 368 /* 369 * Shutdown the CPU as much as possible 370 */ 371 void 372 cpu_halt(void) 373 { 374 for (;;) 375 halt(); 376 } 377 378 static void 379 cpu_reset_real(void) 380 { 381 struct region_descriptor null_idt; 382 int b; 383 384 disable_intr(); 385 #ifdef CPU_ELAN 386 if (elan_mmcr != NULL) 387 elan_mmcr->RESCFG = 1; 388 #endif 389 #ifdef __i386__ 390 if (cpu == CPU_GEODE1100) { 391 /* Attempt Geode's own reset */ 392 outl(0xcf8, 0x80009044ul); 393 outl(0xcfc, 0xf); 394 } 395 #endif 396 #if !defined(BROKEN_KEYBOARD_RESET) 397 /* 398 * Attempt to do a CPU reset via the keyboard controller, 399 * do not turn off GateA20, as any machine that fails 400 * to do the reset here would then end up in no man's land. 401 */ 402 outb(IO_KBD + 4, 0xFE); 403 DELAY(500000); /* wait 0.5 sec to see if that did it */ 404 #endif 405 406 /* 407 * Attempt to force a reset via the Reset Control register at 408 * I/O port 0xcf9. Bit 2 forces a system reset when it 409 * transitions from 0 to 1. Bit 1 selects the type of reset 410 * to attempt: 0 selects a "soft" reset, and 1 selects a 411 * "hard" reset. We try a "hard" reset. The first write sets 412 * bit 1 to select a "hard" reset and clears bit 2. The 413 * second write forces a 0 -> 1 transition in bit 2 to trigger 414 * a reset. 415 */ 416 outb(0xcf9, 0x2); 417 outb(0xcf9, 0x6); 418 DELAY(500000); /* wait 0.5 sec to see if that did it */ 419 420 /* 421 * Attempt to force a reset via the Fast A20 and Init register 422 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate. 423 * Bit 0 asserts INIT# when set to 1. We are careful to only 424 * preserve bit 1 while setting bit 0. We also must clear bit 425 * 0 before setting it if it isn't already clear. 426 */ 427 b = inb(0x92); 428 if (b != 0xff) { 429 if ((b & 0x1) != 0) 430 outb(0x92, b & 0xfe); 431 outb(0x92, b | 0x1); 432 DELAY(500000); /* wait 0.5 sec to see if that did it */ 433 } 434 435 printf("No known reset method worked, attempting CPU shutdown\n"); 436 DELAY(1000000); /* wait 1 sec for printf to complete */ 437 438 /* Wipe the IDT. */ 439 null_idt.rd_limit = 0; 440 null_idt.rd_base = 0; 441 lidt(&null_idt); 442 443 /* "good night, sweet prince .... <THUNK!>" */ 444 breakpoint(); 445 446 /* NOTREACHED */ 447 while(1); 448 } 449 450 #ifdef SMP 451 static void 452 cpu_reset_proxy(void) 453 { 454 455 cpu_reset_proxy_active = 1; 456 while (cpu_reset_proxy_active == 1) 457 ia32_pause(); /* Wait for other cpu to see that we've started */ 458 459 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); 460 DELAY(1000000); 461 cpu_reset_real(); 462 } 463 #endif 464 465 void 466 cpu_reset(void) 467 { 468 #ifdef SMP 469 struct monitorbuf *mb; 470 cpuset_t map; 471 u_int cnt; 472 473 if (smp_started) { 474 map = all_cpus; 475 CPU_CLR(PCPU_GET(cpuid), &map); 476 CPU_ANDNOT(&map, &stopped_cpus); 477 if (!CPU_EMPTY(&map)) { 478 printf("cpu_reset: Stopping other CPUs\n"); 479 stop_cpus(map); 480 } 481 482 if (PCPU_GET(cpuid) != 0) { 483 cpu_reset_proxyid = PCPU_GET(cpuid); 484 cpustop_restartfunc = cpu_reset_proxy; 485 cpu_reset_proxy_active = 0; 486 printf("cpu_reset: Restarting BSP\n"); 487 488 /* Restart CPU #0. */ 489 CPU_SETOF(0, &started_cpus); 490 mb = &pcpu_find(0)->pc_monitorbuf; 491 atomic_store_int(&mb->stop_state, 492 MONITOR_STOPSTATE_RUNNING); 493 494 cnt = 0; 495 while (cpu_reset_proxy_active == 0 && cnt < 10000000) { 496 ia32_pause(); 497 cnt++; /* Wait for BSP to announce restart */ 498 } 499 if (cpu_reset_proxy_active == 0) { 500 printf("cpu_reset: Failed to restart BSP\n"); 501 } else { 502 cpu_reset_proxy_active = 2; 503 while (1) 504 ia32_pause(); 505 /* NOTREACHED */ 506 } 507 } 508 509 DELAY(1000000); 510 } 511 #endif 512 cpu_reset_real(); 513 /* NOTREACHED */ 514 } 515 516 bool 517 cpu_mwait_usable(void) 518 { 519 520 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags & 521 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) == 522 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK))); 523 } 524 525 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */ 526 527 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */ 528 529 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */ 530 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait, 531 0, "Use MONITOR/MWAIT for short idle"); 532 533 static void 534 cpu_idle_acpi(sbintime_t sbt) 535 { 536 int *state; 537 538 state = &PCPU_PTR(monitorbuf)->idle_state; 539 atomic_store_int(state, STATE_SLEEPING); 540 541 /* See comments in cpu_idle_hlt(). */ 542 disable_intr(); 543 if (sched_runnable()) 544 enable_intr(); 545 else if (cpu_idle_hook) 546 cpu_idle_hook(sbt); 547 else 548 acpi_cpu_c1(); 549 atomic_store_int(state, STATE_RUNNING); 550 } 551 552 static void 553 cpu_idle_hlt(sbintime_t sbt) 554 { 555 int *state; 556 557 state = &PCPU_PTR(monitorbuf)->idle_state; 558 atomic_store_int(state, STATE_SLEEPING); 559 560 /* 561 * Since we may be in a critical section from cpu_idle(), if 562 * an interrupt fires during that critical section we may have 563 * a pending preemption. If the CPU halts, then that thread 564 * may not execute until a later interrupt awakens the CPU. 565 * To handle this race, check for a runnable thread after 566 * disabling interrupts and immediately return if one is 567 * found. Also, we must absolutely guarentee that hlt is 568 * the next instruction after sti. This ensures that any 569 * interrupt that fires after the call to disable_intr() will 570 * immediately awaken the CPU from hlt. Finally, please note 571 * that on x86 this works fine because of interrupts enabled only 572 * after the instruction following sti takes place, while IF is set 573 * to 1 immediately, allowing hlt instruction to acknowledge the 574 * interrupt. 575 */ 576 disable_intr(); 577 if (sched_runnable()) 578 enable_intr(); 579 else 580 acpi_cpu_c1(); 581 atomic_store_int(state, STATE_RUNNING); 582 } 583 584 static void 585 cpu_idle_mwait(sbintime_t sbt) 586 { 587 int *state; 588 589 state = &PCPU_PTR(monitorbuf)->idle_state; 590 atomic_store_int(state, STATE_MWAIT); 591 592 /* See comments in cpu_idle_hlt(). */ 593 disable_intr(); 594 if (sched_runnable()) { 595 atomic_store_int(state, STATE_RUNNING); 596 enable_intr(); 597 return; 598 } 599 600 cpu_monitor(state, 0, 0); 601 if (atomic_load_int(state) == STATE_MWAIT) 602 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0)); 603 else 604 enable_intr(); 605 atomic_store_int(state, STATE_RUNNING); 606 } 607 608 static void 609 cpu_idle_spin(sbintime_t sbt) 610 { 611 int *state; 612 int i; 613 614 state = &PCPU_PTR(monitorbuf)->idle_state; 615 atomic_store_int(state, STATE_RUNNING); 616 617 /* 618 * The sched_runnable() call is racy but as long as there is 619 * a loop missing it one time will have just a little impact if any 620 * (and it is much better than missing the check at all). 621 */ 622 for (i = 0; i < 1000; i++) { 623 if (sched_runnable()) 624 return; 625 cpu_spinwait(); 626 } 627 } 628 629 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi; 630 631 void 632 cpu_idle(int busy) 633 { 634 uint64_t msr; 635 sbintime_t sbt = -1; 636 637 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", 638 busy, curcpu); 639 #ifdef MP_WATCHDOG 640 ap_watchdog(PCPU_GET(cpuid)); 641 #endif 642 643 /* If we are busy - try to use fast methods. */ 644 if (busy) { 645 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) { 646 cpu_idle_mwait(busy); 647 goto out; 648 } 649 } 650 651 /* If we have time - switch timers into idle mode. */ 652 if (!busy) { 653 critical_enter(); 654 sbt = cpu_idleclock(); 655 } 656 657 /* Apply AMD APIC timer C1E workaround. */ 658 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) { 659 msr = rdmsr(MSR_AMDK8_IPM); 660 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0) 661 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT | 662 AMDK8_C1EONCMPHALT)); 663 } 664 665 /* Call main idle method. */ 666 cpu_idle_fn(sbt); 667 668 /* Switch timers back into active mode. */ 669 if (!busy) { 670 cpu_activeclock(); 671 critical_exit(); 672 } 673 out: 674 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", 675 busy, curcpu); 676 } 677 678 static int cpu_idle_apl31_workaround; 679 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW, 680 &cpu_idle_apl31_workaround, 0, 681 "Apollo Lake APL31 MWAIT bug workaround"); 682 683 int 684 cpu_idle_wakeup(int cpu) 685 { 686 struct monitorbuf *mb; 687 int *state; 688 689 mb = &pcpu_find(cpu)->pc_monitorbuf; 690 state = &mb->idle_state; 691 switch (atomic_load_int(state)) { 692 case STATE_SLEEPING: 693 return (0); 694 case STATE_MWAIT: 695 atomic_store_int(state, STATE_RUNNING); 696 return (cpu_idle_apl31_workaround ? 0 : 1); 697 case STATE_RUNNING: 698 return (1); 699 default: 700 panic("bad monitor state"); 701 return (1); 702 } 703 } 704 705 /* 706 * Ordered by speed/power consumption. 707 */ 708 static struct { 709 void *id_fn; 710 char *id_name; 711 int id_cpuid2_flag; 712 } idle_tbl[] = { 713 { .id_fn = cpu_idle_spin, .id_name = "spin" }, 714 { .id_fn = cpu_idle_mwait, .id_name = "mwait", 715 .id_cpuid2_flag = CPUID2_MON }, 716 { .id_fn = cpu_idle_hlt, .id_name = "hlt" }, 717 { .id_fn = cpu_idle_acpi, .id_name = "acpi" }, 718 }; 719 720 static int 721 idle_sysctl_available(SYSCTL_HANDLER_ARGS) 722 { 723 char *avail, *p; 724 int error; 725 int i; 726 727 avail = malloc(256, M_TEMP, M_WAITOK); 728 p = avail; 729 for (i = 0; i < nitems(idle_tbl); i++) { 730 if (idle_tbl[i].id_cpuid2_flag != 0 && 731 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) 732 continue; 733 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 734 cpu_idle_hook == NULL) 735 continue; 736 p += sprintf(p, "%s%s", p != avail ? ", " : "", 737 idle_tbl[i].id_name); 738 } 739 error = sysctl_handle_string(oidp, avail, 0, req); 740 free(avail, M_TEMP); 741 return (error); 742 } 743 744 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, 745 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 746 0, 0, idle_sysctl_available, "A", 747 "list of available idle functions"); 748 749 static bool 750 cpu_idle_selector(const char *new_idle_name) 751 { 752 int i; 753 754 for (i = 0; i < nitems(idle_tbl); i++) { 755 if (idle_tbl[i].id_cpuid2_flag != 0 && 756 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0) 757 continue; 758 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 && 759 cpu_idle_hook == NULL) 760 continue; 761 if (strcmp(idle_tbl[i].id_name, new_idle_name)) 762 continue; 763 cpu_idle_fn = idle_tbl[i].id_fn; 764 if (bootverbose) 765 printf("CPU idle set to %s\n", idle_tbl[i].id_name); 766 return (true); 767 } 768 return (false); 769 } 770 771 static int 772 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS) 773 { 774 char buf[16], *p; 775 int error, i; 776 777 p = "unknown"; 778 for (i = 0; i < nitems(idle_tbl); i++) { 779 if (idle_tbl[i].id_fn == cpu_idle_fn) { 780 p = idle_tbl[i].id_name; 781 break; 782 } 783 } 784 strncpy(buf, p, sizeof(buf)); 785 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 786 if (error != 0 || req->newptr == NULL) 787 return (error); 788 return (cpu_idle_selector(buf) ? 0 : EINVAL); 789 } 790 791 SYSCTL_PROC(_machdep, OID_AUTO, idle, 792 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 793 0, 0, cpu_idle_sysctl, "A", 794 "currently selected idle function"); 795 796 static void 797 cpu_idle_tun(void *unused __unused) 798 { 799 char tunvar[16]; 800 801 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar))) 802 cpu_idle_selector(tunvar); 803 else if (cpu_vendor_id == CPU_VENDOR_AMD && 804 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) { 805 /* Ryzen erratas 1057, 1109. */ 806 cpu_idle_selector("hlt"); 807 idle_mwait = 0; 808 mwait_cpustop_broken = true; 809 } 810 811 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) { 812 /* 813 * Apollo Lake errata APL31 (public errata APL30). 814 * Stores to the armed address range may not trigger 815 * MWAIT to resume execution. OS needs to use 816 * interrupts to wake processors from MWAIT-induced 817 * sleep states. 818 */ 819 cpu_idle_apl31_workaround = 1; 820 mwait_cpustop_broken = true; 821 } 822 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround); 823 } 824 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL); 825 826 static int panic_on_nmi = 0xff; 827 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN, 828 &panic_on_nmi, 0, 829 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all"); 830 int nmi_is_broadcast = 1; 831 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN, 832 &nmi_is_broadcast, 0, 833 "Chipset NMI is broadcast"); 834 int (*apei_nmi)(void); 835 836 void 837 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame) 838 { 839 bool claimed = false; 840 841 #ifdef DEV_ISA 842 /* machine/parity/power fail/"kitchen sink" faults */ 843 if (isa_nmi(frame->tf_err)) { 844 claimed = true; 845 if ((panic_on_nmi & 1) != 0) 846 panic("NMI indicates hardware failure"); 847 } 848 #endif /* DEV_ISA */ 849 850 /* ACPI Platform Error Interfaces callback. */ 851 if (apei_nmi != NULL && (*apei_nmi)()) 852 claimed = true; 853 854 /* 855 * NMIs can be useful for debugging. They can be hooked up to a 856 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be 857 * generated by an IPMI BMC, either manually or in response to a 858 * watchdog timeout. For example, see the "power diag" command in 859 * ports/sysutils/ipmitool. They can also be generated by a 860 * hypervisor; see "bhyvectl --inject-nmi". 861 */ 862 863 #ifdef KDB 864 if (!claimed && (panic_on_nmi & 2) != 0) { 865 if (debugger_on_panic) { 866 printf("NMI/cpu%d ... going to debugger\n", cpu); 867 claimed = kdb_trap(type, 0, frame); 868 } 869 } 870 #endif /* KDB */ 871 872 if (!claimed && panic_on_nmi != 0) 873 panic("NMI"); 874 } 875 876 void 877 nmi_handle_intr(u_int type, struct trapframe *frame) 878 { 879 880 #ifdef SMP 881 if (nmi_is_broadcast) { 882 nmi_call_kdb_smp(type, frame); 883 return; 884 } 885 #endif 886 nmi_call_kdb(PCPU_GET(cpuid), type, frame); 887 } 888 889 static int hw_ibrs_active; 890 int hw_ibrs_ibpb_active; 891 int hw_ibrs_disable = 1; 892 893 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0, 894 "Indirect Branch Restricted Speculation active"); 895 896 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs, 897 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 898 "Indirect Branch Restricted Speculation active"); 899 900 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD, 901 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active"); 902 903 void 904 hw_ibrs_recalculate(bool for_all_cpus) 905 { 906 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) { 907 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ? 908 MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) | 909 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR), 910 IA32_SPEC_CTRL_IBRS, NULL); 911 hw_ibrs_active = hw_ibrs_disable == 0; 912 hw_ibrs_ibpb_active = 0; 913 } else { 914 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 & 915 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable; 916 } 917 } 918 919 static int 920 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS) 921 { 922 int error, val; 923 924 val = hw_ibrs_disable; 925 error = sysctl_handle_int(oidp, &val, 0, req); 926 if (error != 0 || req->newptr == NULL) 927 return (error); 928 hw_ibrs_disable = val != 0; 929 hw_ibrs_recalculate(true); 930 return (0); 931 } 932 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN | 933 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I", 934 "Disable Indirect Branch Restricted Speculation"); 935 936 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT | 937 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 938 hw_ibrs_disable_handler, "I", 939 "Disable Indirect Branch Restricted Speculation"); 940 941 int hw_ssb_active; 942 int hw_ssb_disable; 943 944 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD, 945 &hw_ssb_active, 0, 946 "Speculative Store Bypass Disable active"); 947 948 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb, 949 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 950 "Speculative Store Bypass Disable active"); 951 952 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD, 953 &hw_ssb_active, 0, "Speculative Store Bypass Disable active"); 954 955 static void 956 hw_ssb_set(bool enable, bool for_all_cpus) 957 { 958 959 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) { 960 hw_ssb_active = 0; 961 return; 962 } 963 hw_ssb_active = enable; 964 x86_msr_op(MSR_IA32_SPEC_CTRL, 965 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 966 (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL), 967 IA32_SPEC_CTRL_SSBD, NULL); 968 } 969 970 void 971 hw_ssb_recalculate(bool all_cpus) 972 { 973 974 switch (hw_ssb_disable) { 975 default: 976 hw_ssb_disable = 0; 977 /* FALLTHROUGH */ 978 case 0: /* off */ 979 hw_ssb_set(false, all_cpus); 980 break; 981 case 1: /* on */ 982 hw_ssb_set(true, all_cpus); 983 break; 984 case 2: /* auto */ 985 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ? 986 false : true, all_cpus); 987 break; 988 } 989 } 990 991 static int 992 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS) 993 { 994 int error, val; 995 996 val = hw_ssb_disable; 997 error = sysctl_handle_int(oidp, &val, 0, req); 998 if (error != 0 || req->newptr == NULL) 999 return (error); 1000 hw_ssb_disable = val; 1001 hw_ssb_recalculate(true); 1002 return (0); 1003 } 1004 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT | 1005 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1006 hw_ssb_disable_handler, "I", 1007 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)"); 1008 1009 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT | 1010 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1011 hw_ssb_disable_handler, "I", 1012 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)"); 1013 1014 int hw_mds_disable; 1015 1016 /* 1017 * Handler for Microarchitectural Data Sampling issues. Really not a 1018 * pointer to C function: on amd64 the code must not change any CPU 1019 * architectural state except possibly %rflags. Also, it is always 1020 * called with interrupts disabled. 1021 */ 1022 void mds_handler_void(void); 1023 void mds_handler_verw(void); 1024 void mds_handler_ivb(void); 1025 void mds_handler_bdw(void); 1026 void mds_handler_skl_sse(void); 1027 void mds_handler_skl_avx(void); 1028 void mds_handler_skl_avx512(void); 1029 void mds_handler_silvermont(void); 1030 void (*mds_handler)(void) = mds_handler_void; 1031 1032 static int 1033 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS) 1034 { 1035 const char *state; 1036 1037 if (mds_handler == mds_handler_void) 1038 state = "inactive"; 1039 else if (mds_handler == mds_handler_verw) 1040 state = "VERW"; 1041 else if (mds_handler == mds_handler_ivb) 1042 state = "software IvyBridge"; 1043 else if (mds_handler == mds_handler_bdw) 1044 state = "software Broadwell"; 1045 else if (mds_handler == mds_handler_skl_sse) 1046 state = "software Skylake SSE"; 1047 else if (mds_handler == mds_handler_skl_avx) 1048 state = "software Skylake AVX"; 1049 else if (mds_handler == mds_handler_skl_avx512) 1050 state = "software Skylake AVX512"; 1051 else if (mds_handler == mds_handler_silvermont) 1052 state = "software Silvermont"; 1053 else 1054 state = "unknown"; 1055 return (SYSCTL_OUT(req, state, strlen(state))); 1056 } 1057 1058 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state, 1059 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1060 sysctl_hw_mds_disable_state_handler, "A", 1061 "Microarchitectural Data Sampling Mitigation state"); 1062 1063 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds, 1064 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1065 "Microarchitectural Data Sampling Mitigation state"); 1066 1067 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state, 1068 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1069 sysctl_hw_mds_disable_state_handler, "A", 1070 "Microarchitectural Data Sampling Mitigation state"); 1071 1072 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512"); 1073 1074 void 1075 hw_mds_recalculate(void) 1076 { 1077 struct pcpu *pc; 1078 vm_offset_t b64; 1079 u_long xcr0; 1080 int i; 1081 1082 /* 1083 * Allow user to force VERW variant even if MD_CLEAR is not 1084 * reported. For instance, hypervisor might unknowingly 1085 * filter the cap out. 1086 * For the similar reasons, and for testing, allow to enable 1087 * mitigation even when MDS_NO cap is set. 1088 */ 1089 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 || 1090 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 && 1091 hw_mds_disable == 3)) { 1092 mds_handler = mds_handler_void; 1093 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 && 1094 hw_mds_disable == 3) || hw_mds_disable == 1) { 1095 mds_handler = mds_handler_verw; 1096 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1097 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e || 1098 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a || 1099 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 || 1100 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d || 1101 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e || 1102 CPUID_TO_MODEL(cpu_id) == 0x3a) && 1103 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1104 /* 1105 * Nehalem, SandyBridge, IvyBridge 1106 */ 1107 CPU_FOREACH(i) { 1108 pc = pcpu_find(i); 1109 if (pc->pc_mds_buf == NULL) { 1110 pc->pc_mds_buf = malloc_domainset(672, M_TEMP, 1111 DOMAINSET_PREF(pc->pc_domain), M_WAITOK); 1112 bzero(pc->pc_mds_buf, 16); 1113 } 1114 } 1115 mds_handler = mds_handler_ivb; 1116 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1117 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c || 1118 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 || 1119 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f || 1120 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) && 1121 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1122 /* 1123 * Haswell, Broadwell 1124 */ 1125 CPU_FOREACH(i) { 1126 pc = pcpu_find(i); 1127 if (pc->pc_mds_buf == NULL) { 1128 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP, 1129 DOMAINSET_PREF(pc->pc_domain), M_WAITOK); 1130 bzero(pc->pc_mds_buf, 16); 1131 } 1132 } 1133 mds_handler = mds_handler_bdw; 1134 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1135 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id & 1136 CPUID_STEPPING) <= 5) || 1137 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e || 1138 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id & 1139 CPUID_STEPPING) <= 0xb) || 1140 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id & 1141 CPUID_STEPPING) <= 0xc)) && 1142 (hw_mds_disable == 2 || hw_mds_disable == 3)) { 1143 /* 1144 * Skylake, KabyLake, CoffeeLake, WhiskeyLake, 1145 * CascadeLake 1146 */ 1147 CPU_FOREACH(i) { 1148 pc = pcpu_find(i); 1149 if (pc->pc_mds_buf == NULL) { 1150 pc->pc_mds_buf = malloc_domainset(6 * 1024, 1151 M_TEMP, DOMAINSET_PREF(pc->pc_domain), 1152 M_WAITOK); 1153 b64 = (vm_offset_t)malloc_domainset(64 + 63, 1154 M_TEMP, DOMAINSET_PREF(pc->pc_domain), 1155 M_WAITOK); 1156 pc->pc_mds_buf64 = (void *)roundup2(b64, 64); 1157 bzero(pc->pc_mds_buf64, 64); 1158 } 1159 } 1160 xcr0 = rxcr(0); 1161 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 && 1162 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0) 1163 mds_handler = mds_handler_skl_avx512; 1164 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 && 1165 (cpu_feature2 & CPUID2_AVX) != 0) 1166 mds_handler = mds_handler_skl_avx; 1167 else 1168 mds_handler = mds_handler_skl_sse; 1169 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 1170 ((CPUID_TO_MODEL(cpu_id) == 0x37 || 1171 CPUID_TO_MODEL(cpu_id) == 0x4a || 1172 CPUID_TO_MODEL(cpu_id) == 0x4c || 1173 CPUID_TO_MODEL(cpu_id) == 0x4d || 1174 CPUID_TO_MODEL(cpu_id) == 0x5a || 1175 CPUID_TO_MODEL(cpu_id) == 0x5d || 1176 CPUID_TO_MODEL(cpu_id) == 0x6e || 1177 CPUID_TO_MODEL(cpu_id) == 0x65 || 1178 CPUID_TO_MODEL(cpu_id) == 0x75 || 1179 CPUID_TO_MODEL(cpu_id) == 0x1c || 1180 CPUID_TO_MODEL(cpu_id) == 0x26 || 1181 CPUID_TO_MODEL(cpu_id) == 0x27 || 1182 CPUID_TO_MODEL(cpu_id) == 0x35 || 1183 CPUID_TO_MODEL(cpu_id) == 0x36 || 1184 CPUID_TO_MODEL(cpu_id) == 0x7a))) { 1185 /* Silvermont, Airmont */ 1186 CPU_FOREACH(i) { 1187 pc = pcpu_find(i); 1188 if (pc->pc_mds_buf == NULL) 1189 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK); 1190 } 1191 mds_handler = mds_handler_silvermont; 1192 } else { 1193 hw_mds_disable = 0; 1194 mds_handler = mds_handler_void; 1195 } 1196 } 1197 1198 static void 1199 hw_mds_recalculate_boot(void *arg __unused) 1200 { 1201 1202 hw_mds_recalculate(); 1203 } 1204 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL); 1205 1206 static int 1207 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS) 1208 { 1209 int error, val; 1210 1211 val = hw_mds_disable; 1212 error = sysctl_handle_int(oidp, &val, 0, req); 1213 if (error != 0 || req->newptr == NULL) 1214 return (error); 1215 if (val < 0 || val > 3) 1216 return (EINVAL); 1217 hw_mds_disable = val; 1218 hw_mds_recalculate(); 1219 return (0); 1220 } 1221 1222 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT | 1223 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1224 sysctl_mds_disable_handler, "I", 1225 "Microarchitectural Data Sampling Mitigation " 1226 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)"); 1227 1228 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT | 1229 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1230 sysctl_mds_disable_handler, "I", 1231 "Microarchitectural Data Sampling Mitigation " 1232 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)"); 1233 1234 /* 1235 * Intel Transactional Memory Asynchronous Abort Mitigation 1236 * CVE-2019-11135 1237 */ 1238 int x86_taa_enable; 1239 int x86_taa_state; 1240 enum { 1241 TAA_NONE = 0, /* No mitigation enabled */ 1242 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */ 1243 TAA_VERW = 2, /* Use VERW mitigation */ 1244 TAA_AUTO = 3, /* Automatically select the mitigation */ 1245 1246 /* The states below are not selectable by the operator */ 1247 1248 TAA_TAA_UC = 4, /* Mitigation present in microcode */ 1249 TAA_NOT_PRESENT = 5 /* TSX is not present */ 1250 }; 1251 1252 static void 1253 taa_set(bool enable, bool all) 1254 { 1255 1256 x86_msr_op(MSR_IA32_TSX_CTRL, 1257 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 1258 (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL), 1259 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR, 1260 NULL); 1261 } 1262 1263 void 1264 x86_taa_recalculate(void) 1265 { 1266 static int taa_saved_mds_disable = 0; 1267 int taa_need = 0, taa_state = 0; 1268 int mds_disable = 0, need_mds_recalc = 0; 1269 1270 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */ 1271 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 || 1272 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) { 1273 /* TSX is not present */ 1274 x86_taa_state = TAA_NOT_PRESENT; 1275 return; 1276 } 1277 1278 /* Check to see what mitigation options the CPU gives us */ 1279 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) { 1280 /* CPU is not suseptible to TAA */ 1281 taa_need = TAA_TAA_UC; 1282 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) { 1283 /* 1284 * CPU can turn off TSX. This is the next best option 1285 * if TAA_NO hardware mitigation isn't present 1286 */ 1287 taa_need = TAA_TSX_DISABLE; 1288 } else { 1289 /* No TSX/TAA specific remedies are available. */ 1290 if (x86_taa_enable == TAA_TSX_DISABLE) { 1291 if (bootverbose) 1292 printf("TSX control not available\n"); 1293 return; 1294 } else 1295 taa_need = TAA_VERW; 1296 } 1297 1298 /* Can we automatically take action, or are we being forced? */ 1299 if (x86_taa_enable == TAA_AUTO) 1300 taa_state = taa_need; 1301 else 1302 taa_state = x86_taa_enable; 1303 1304 /* No state change, nothing to do */ 1305 if (taa_state == x86_taa_state) { 1306 if (bootverbose) 1307 printf("No TSX change made\n"); 1308 return; 1309 } 1310 1311 /* Does the MSR need to be turned on or off? */ 1312 if (taa_state == TAA_TSX_DISABLE) 1313 taa_set(true, true); 1314 else if (x86_taa_state == TAA_TSX_DISABLE) 1315 taa_set(false, true); 1316 1317 /* Does MDS need to be set to turn on VERW? */ 1318 if (taa_state == TAA_VERW) { 1319 taa_saved_mds_disable = hw_mds_disable; 1320 mds_disable = hw_mds_disable = 1; 1321 need_mds_recalc = 1; 1322 } else if (x86_taa_state == TAA_VERW) { 1323 mds_disable = hw_mds_disable = taa_saved_mds_disable; 1324 need_mds_recalc = 1; 1325 } 1326 if (need_mds_recalc) { 1327 hw_mds_recalculate(); 1328 if (mds_disable != hw_mds_disable) { 1329 if (bootverbose) 1330 printf("Cannot change MDS state for TAA\n"); 1331 /* Don't update our state */ 1332 return; 1333 } 1334 } 1335 1336 x86_taa_state = taa_state; 1337 return; 1338 } 1339 1340 static void 1341 taa_recalculate_boot(void * arg __unused) 1342 { 1343 1344 x86_taa_recalculate(); 1345 } 1346 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL); 1347 1348 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa, 1349 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1350 "TSX Asynchronous Abort Mitigation"); 1351 1352 static int 1353 sysctl_taa_handler(SYSCTL_HANDLER_ARGS) 1354 { 1355 int error, val; 1356 1357 val = x86_taa_enable; 1358 error = sysctl_handle_int(oidp, &val, 0, req); 1359 if (error != 0 || req->newptr == NULL) 1360 return (error); 1361 if (val < TAA_NONE || val > TAA_AUTO) 1362 return (EINVAL); 1363 x86_taa_enable = val; 1364 x86_taa_recalculate(); 1365 return (0); 1366 } 1367 1368 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT | 1369 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1370 sysctl_taa_handler, "I", 1371 "TAA Mitigation enablement control " 1372 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)"); 1373 1374 static int 1375 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS) 1376 { 1377 const char *state; 1378 1379 switch (x86_taa_state) { 1380 case TAA_NONE: 1381 state = "inactive"; 1382 break; 1383 case TAA_TSX_DISABLE: 1384 state = "TSX disabled"; 1385 break; 1386 case TAA_VERW: 1387 state = "VERW"; 1388 break; 1389 case TAA_TAA_UC: 1390 state = "Mitigated in microcode"; 1391 break; 1392 case TAA_NOT_PRESENT: 1393 state = "TSX not present"; 1394 break; 1395 default: 1396 state = "unknown"; 1397 } 1398 1399 return (SYSCTL_OUT(req, state, strlen(state))); 1400 } 1401 1402 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state, 1403 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1404 sysctl_taa_state_handler, "A", 1405 "TAA Mitigation state"); 1406 1407 int __read_frequently cpu_flush_rsb_ctxsw; 1408 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw, 1409 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0, 1410 "Flush Return Stack Buffer on context switch"); 1411 1412 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds, 1413 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1414 "MCU Optimization, disable RDSEED mitigation"); 1415 1416 int x86_rngds_mitg_enable = 1; 1417 void 1418 x86_rngds_mitg_recalculate(bool all_cpus) 1419 { 1420 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) 1421 return; 1422 x86_msr_op(MSR_IA32_MCU_OPT_CTRL, 1423 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) | 1424 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL), 1425 IA32_RNGDS_MITG_DIS, NULL); 1426 } 1427 1428 static int 1429 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS) 1430 { 1431 int error, val; 1432 1433 val = x86_rngds_mitg_enable; 1434 error = sysctl_handle_int(oidp, &val, 0, req); 1435 if (error != 0 || req->newptr == NULL) 1436 return (error); 1437 x86_rngds_mitg_enable = val; 1438 x86_rngds_mitg_recalculate(true); 1439 return (0); 1440 } 1441 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT | 1442 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1443 sysctl_rngds_mitg_enable_handler, "I", 1444 "MCU Optimization, disabling RDSEED mitigation control " 1445 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)"); 1446 1447 static int 1448 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS) 1449 { 1450 const char *state; 1451 1452 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) { 1453 state = "Not applicable"; 1454 } else if (x86_rngds_mitg_enable == 0) { 1455 state = "RDSEED not serialized"; 1456 } else { 1457 state = "Mitigated"; 1458 } 1459 return (SYSCTL_OUT(req, state, strlen(state))); 1460 } 1461 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state, 1462 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1463 sysctl_rngds_state_handler, "A", 1464 "MCU Optimization state"); 1465 1466 /* 1467 * Enable and restore kernel text write permissions. 1468 * Callers must ensure that disable_wp()/restore_wp() are executed 1469 * without rescheduling on the same core. 1470 */ 1471 bool 1472 disable_wp(void) 1473 { 1474 u_int cr0; 1475 1476 cr0 = rcr0(); 1477 if ((cr0 & CR0_WP) == 0) 1478 return (false); 1479 load_cr0(cr0 & ~CR0_WP); 1480 return (true); 1481 } 1482 1483 void 1484 restore_wp(bool old_wp) 1485 { 1486 1487 if (old_wp) 1488 load_cr0(rcr0() | CR0_WP); 1489 } 1490 1491 bool 1492 acpi_get_fadt_bootflags(uint16_t *flagsp) 1493 { 1494 #ifdef DEV_ACPI 1495 ACPI_TABLE_FADT *fadt; 1496 vm_paddr_t physaddr; 1497 1498 physaddr = acpi_find_table(ACPI_SIG_FADT); 1499 if (physaddr == 0) 1500 return (false); 1501 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT); 1502 if (fadt == NULL) 1503 return (false); 1504 *flagsp = fadt->BootFlags; 1505 acpi_unmap_table(fadt); 1506 return (true); 1507 #else 1508 return (false); 1509 #endif 1510 } 1511