xref: /freebsd/sys/x86/x86/cpu_machdep.c (revision 194bb58b80c184b8230edef0ed7f292b4bf706b0)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1992 Terrence R. Lambert.
4  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 #include "opt_acpi.h"
41 #include "opt_atpic.h"
42 #include "opt_cpu.h"
43 #include "opt_ddb.h"
44 #include "opt_inet.h"
45 #include "opt_isa.h"
46 #include "opt_kdb.h"
47 #include "opt_kstack_pages.h"
48 #include "opt_maxmem.h"
49 #include "opt_platform.h"
50 #include "opt_sched.h"
51 #ifdef __i386__
52 #include "opt_apic.h"
53 #endif
54 
55 #include <sys/param.h>
56 #include <sys/proc.h>
57 #include <sys/systm.h>
58 #include <sys/bus.h>
59 #include <sys/cpu.h>
60 #include <sys/domainset.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/ktr.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/pcpu.h>
68 #include <sys/pmckern.h>
69 #include <sys/rwlock.h>
70 #include <sys/sched.h>
71 #include <sys/smp.h>
72 #include <sys/sysctl.h>
73 
74 #include <machine/clock.h>
75 #include <machine/cpu.h>
76 #include <machine/cpufunc.h>
77 #include <machine/cputypes.h>
78 #include <machine/specialreg.h>
79 #include <machine/md_var.h>
80 #include <machine/trap.h>
81 #include <machine/tss.h>
82 #ifdef SMP
83 #include <machine/smp.h>
84 #endif
85 #ifdef CPU_ELAN
86 #include <machine/elan_mmcr.h>
87 #endif
88 #include <x86/acpica_machdep.h>
89 #include <x86/ifunc.h>
90 
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99 
100 #include <isa/isareg.h>
101 
102 #include <contrib/dev/acpica/include/acpi.h>
103 
104 #define	STATE_RUNNING	0x0
105 #define	STATE_MWAIT	0x1
106 #define	STATE_SLEEPING	0x2
107 
108 #ifdef SMP
109 static u_int	cpu_reset_proxyid;
110 static volatile u_int	cpu_reset_proxy_active;
111 #endif
112 
113 char bootmethod[16];
114 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
115     "System firmware boot method");
116 
117 struct msr_op_arg {
118 	u_int msr;
119 	int op;
120 	uint64_t arg1;
121 	uint64_t *res;
122 };
123 
124 static void
x86_msr_op_one(void * argp)125 x86_msr_op_one(void *argp)
126 {
127 	struct msr_op_arg *a;
128 	uint64_t v;
129 
130 	a = argp;
131 	switch (a->op) {
132 	case MSR_OP_ANDNOT:
133 		v = rdmsr(a->msr);
134 		v &= ~a->arg1;
135 		wrmsr(a->msr, v);
136 		break;
137 	case MSR_OP_OR:
138 		v = rdmsr(a->msr);
139 		v |= a->arg1;
140 		wrmsr(a->msr, v);
141 		break;
142 	case MSR_OP_WRITE:
143 		wrmsr(a->msr, a->arg1);
144 		break;
145 	case MSR_OP_READ:
146 		v = rdmsr(a->msr);
147 		*a->res = v;
148 		break;
149 	}
150 }
151 
152 #define	MSR_OP_EXMODE_MASK	0xf0000000
153 #define	MSR_OP_OP_MASK		0x000000ff
154 #define	MSR_OP_GET_CPUID(x)	(((x) & ~MSR_OP_EXMODE_MASK) >> 8)
155 
156 void
x86_msr_op(u_int msr,u_int op,uint64_t arg1,uint64_t * res)157 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
158 {
159 	struct thread *td;
160 	struct msr_op_arg a;
161 	cpuset_t set;
162 	u_int exmode;
163 	int bound_cpu, cpu, i, is_bound;
164 
165 	a.op = op & MSR_OP_OP_MASK;
166 	MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
167 	    a.op == MSR_OP_WRITE || a.op == MSR_OP_READ);
168 	exmode = op & MSR_OP_EXMODE_MASK;
169 	MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL ||
170 	    exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL ||
171 	    exmode == MSR_OP_RENDEZVOUS_ONE);
172 	a.msr = msr;
173 	a.arg1 = arg1;
174 	a.res = res;
175 	switch (exmode) {
176 	case MSR_OP_LOCAL:
177 		x86_msr_op_one(&a);
178 		break;
179 	case MSR_OP_SCHED_ALL:
180 		td = curthread;
181 		thread_lock(td);
182 		is_bound = sched_is_bound(td);
183 		bound_cpu = td->td_oncpu;
184 		CPU_FOREACH(i) {
185 			sched_bind(td, i);
186 			x86_msr_op_one(&a);
187 		}
188 		if (is_bound)
189 			sched_bind(td, bound_cpu);
190 		else
191 			sched_unbind(td);
192 		thread_unlock(td);
193 		break;
194 	case MSR_OP_SCHED_ONE:
195 		td = curthread;
196 		cpu = MSR_OP_GET_CPUID(op);
197 		thread_lock(td);
198 		is_bound = sched_is_bound(td);
199 		bound_cpu = td->td_oncpu;
200 		if (!is_bound || bound_cpu != cpu)
201 			sched_bind(td, cpu);
202 		x86_msr_op_one(&a);
203 		if (is_bound) {
204 			if (bound_cpu != cpu)
205 				sched_bind(td, bound_cpu);
206 		} else {
207 			sched_unbind(td);
208 		}
209 		thread_unlock(td);
210 		break;
211 	case MSR_OP_RENDEZVOUS_ALL:
212 		smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one,
213 		    smp_no_rendezvous_barrier, &a);
214 		break;
215 	case MSR_OP_RENDEZVOUS_ONE:
216 		cpu = MSR_OP_GET_CPUID(op);
217 		CPU_SETOF(cpu, &set);
218 		smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
219 		    x86_msr_op_one, smp_no_rendezvous_barrier, &a);
220 		break;
221 	}
222 }
223 
224 /*
225  * Automatically initialized per CPU errata in cpu_idle_tun below.
226  */
227 bool mwait_cpustop_broken = false;
228 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
229     &mwait_cpustop_broken, 0,
230     "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
231 
232 /*
233  * Flush the D-cache for non-DMA I/O so that the I-cache can
234  * be made coherent later.
235  */
236 void
cpu_flush_dcache(void * ptr,size_t len)237 cpu_flush_dcache(void *ptr, size_t len)
238 {
239 	/* Not applicable */
240 }
241 
242 void
acpi_cpu_c1(void)243 acpi_cpu_c1(void)
244 {
245 
246 	__asm __volatile("sti; hlt");
247 }
248 
249 /*
250  * Use mwait to pause execution while waiting for an interrupt or
251  * another thread to signal that there is more work.
252  *
253  * NOTE: Interrupts will cause a wakeup; however, this function does
254  * not enable interrupt handling. The caller is responsible to enable
255  * interrupts.
256  */
257 void
acpi_cpu_idle_mwait(uint32_t mwait_hint)258 acpi_cpu_idle_mwait(uint32_t mwait_hint)
259 {
260 	int *state;
261 	uint64_t v;
262 
263 	/*
264 	 * A comment in Linux patch claims that 'CPUs run faster with
265 	 * speculation protection disabled. All CPU threads in a core
266 	 * must disable speculation protection for it to be
267 	 * disabled. Disable it while we are idle so the other
268 	 * hyperthread can run fast.'
269 	 *
270 	 * XXXKIB.  Software coordination mode should be supported,
271 	 * but all Intel CPUs provide hardware coordination.
272 	 */
273 
274 	state = &PCPU_PTR(monitorbuf)->idle_state;
275 	KASSERT(atomic_load_int(state) == STATE_SLEEPING,
276 	    ("cpu_mwait_cx: wrong monitorbuf state"));
277 	atomic_store_int(state, STATE_MWAIT);
278 	if (PCPU_GET(ibpb_set) || hw_ssb_active) {
279 		v = rdmsr(MSR_IA32_SPEC_CTRL);
280 		wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
281 		    IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
282 	} else {
283 		v = 0;
284 	}
285 	cpu_monitor(state, 0, 0);
286 	if (atomic_load_int(state) == STATE_MWAIT)
287 		cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
288 
289 	/*
290 	 * SSB cannot be disabled while we sleep, or rather, if it was
291 	 * disabled, the sysctl thread will bind to our cpu to tweak
292 	 * MSR.
293 	 */
294 	if (v != 0)
295 		wrmsr(MSR_IA32_SPEC_CTRL, v);
296 
297 	/*
298 	 * We should exit on any event that interrupts mwait, because
299 	 * that event might be a wanted interrupt.
300 	 */
301 	atomic_store_int(state, STATE_RUNNING);
302 }
303 
304 /* Get current clock frequency for the given cpu id. */
305 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)306 cpu_est_clockrate(int cpu_id, uint64_t *rate)
307 {
308 	uint64_t tsc1, tsc2;
309 	uint64_t acnt, mcnt, perf;
310 	register_t reg;
311 
312 	if (pcpu_find(cpu_id) == NULL || rate == NULL)
313 		return (EINVAL);
314 #ifdef __i386__
315 	if ((cpu_feature & CPUID_TSC) == 0)
316 		return (EOPNOTSUPP);
317 #endif
318 
319 	/*
320 	 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
321 	 * DELAY(9) based logic fails.
322 	 */
323 	if (tsc_is_invariant && !tsc_perf_stat)
324 		return (EOPNOTSUPP);
325 
326 #ifdef SMP
327 	if (smp_cpus > 1) {
328 		/* Schedule ourselves on the indicated cpu. */
329 		thread_lock(curthread);
330 		sched_bind(curthread, cpu_id);
331 		thread_unlock(curthread);
332 	}
333 #endif
334 
335 	/* Calibrate by measuring a short delay. */
336 	reg = intr_disable();
337 	if (tsc_is_invariant) {
338 		wrmsr(MSR_MPERF, 0);
339 		wrmsr(MSR_APERF, 0);
340 		tsc1 = rdtsc();
341 		DELAY(1000);
342 		mcnt = rdmsr(MSR_MPERF);
343 		acnt = rdmsr(MSR_APERF);
344 		tsc2 = rdtsc();
345 		intr_restore(reg);
346 		perf = 1000 * acnt / mcnt;
347 		*rate = (tsc2 - tsc1) * perf;
348 	} else {
349 		tsc1 = rdtsc();
350 		DELAY(1000);
351 		tsc2 = rdtsc();
352 		intr_restore(reg);
353 		*rate = (tsc2 - tsc1) * 1000;
354 	}
355 
356 #ifdef SMP
357 	if (smp_cpus > 1) {
358 		thread_lock(curthread);
359 		sched_unbind(curthread);
360 		thread_unlock(curthread);
361 	}
362 #endif
363 
364 	return (0);
365 }
366 
367 /*
368  * Shutdown the CPU as much as possible
369  */
370 void
cpu_halt(void)371 cpu_halt(void)
372 {
373 	for (;;)
374 		halt();
375 }
376 
377 static void
cpu_reset_real(void)378 cpu_reset_real(void)
379 {
380 	struct region_descriptor null_idt;
381 	int b;
382 
383 	disable_intr();
384 #ifdef CPU_ELAN
385 	if (elan_mmcr != NULL)
386 		elan_mmcr->RESCFG = 1;
387 #endif
388 #ifdef __i386__
389 	if (cpu == CPU_GEODE1100) {
390 		/* Attempt Geode's own reset */
391 		outl(0xcf8, 0x80009044ul);
392 		outl(0xcfc, 0xf);
393 	}
394 #endif
395 #if !defined(BROKEN_KEYBOARD_RESET)
396 	/*
397 	 * Attempt to do a CPU reset via the keyboard controller,
398 	 * do not turn off GateA20, as any machine that fails
399 	 * to do the reset here would then end up in no man's land.
400 	 */
401 	outb(IO_KBD + 4, 0xFE);
402 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
403 #endif
404 
405 	/*
406 	 * Attempt to force a reset via the Reset Control register at
407 	 * I/O port 0xcf9.  Bit 2 forces a system reset when it
408 	 * transitions from 0 to 1.  Bit 1 selects the type of reset
409 	 * to attempt: 0 selects a "soft" reset, and 1 selects a
410 	 * "hard" reset.  We try a "hard" reset.  The first write sets
411 	 * bit 1 to select a "hard" reset and clears bit 2.  The
412 	 * second write forces a 0 -> 1 transition in bit 2 to trigger
413 	 * a reset.
414 	 */
415 	outb(0xcf9, 0x2);
416 	outb(0xcf9, 0x6);
417 	DELAY(500000);  /* wait 0.5 sec to see if that did it */
418 
419 	/*
420 	 * Attempt to force a reset via the Fast A20 and Init register
421 	 * at I/O port 0x92.  Bit 1 serves as an alternate A20 gate.
422 	 * Bit 0 asserts INIT# when set to 1.  We are careful to only
423 	 * preserve bit 1 while setting bit 0.  We also must clear bit
424 	 * 0 before setting it if it isn't already clear.
425 	 */
426 	b = inb(0x92);
427 	if (b != 0xff) {
428 		if ((b & 0x1) != 0)
429 			outb(0x92, b & 0xfe);
430 		outb(0x92, b | 0x1);
431 		DELAY(500000);  /* wait 0.5 sec to see if that did it */
432 	}
433 
434 	printf("No known reset method worked, attempting CPU shutdown\n");
435 	DELAY(1000000); /* wait 1 sec for printf to complete */
436 
437 	/* Wipe the IDT. */
438 	null_idt.rd_limit = 0;
439 	null_idt.rd_base = 0;
440 	lidt(&null_idt);
441 
442 	/* "good night, sweet prince .... <THUNK!>" */
443 	breakpoint();
444 
445 	/* NOTREACHED */
446 	while(1);
447 }
448 
449 #ifdef SMP
450 static void
cpu_reset_proxy(void)451 cpu_reset_proxy(void)
452 {
453 
454 	cpu_reset_proxy_active = 1;
455 	while (cpu_reset_proxy_active == 1)
456 		ia32_pause(); /* Wait for other cpu to see that we've started */
457 
458 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
459 	DELAY(1000000);
460 	cpu_reset_real();
461 }
462 #endif
463 
464 void
cpu_reset(void)465 cpu_reset(void)
466 {
467 #ifdef SMP
468 	struct monitorbuf *mb;
469 	cpuset_t map;
470 	u_int cnt;
471 
472 	if (smp_started) {
473 		map = all_cpus;
474 		CPU_CLR(PCPU_GET(cpuid), &map);
475 		CPU_ANDNOT(&map, &map, &stopped_cpus);
476 		if (!CPU_EMPTY(&map)) {
477 			printf("cpu_reset: Stopping other CPUs\n");
478 			stop_cpus(map);
479 		}
480 
481 		if (PCPU_GET(cpuid) != 0) {
482 			cpu_reset_proxyid = PCPU_GET(cpuid);
483 			cpustop_restartfunc = cpu_reset_proxy;
484 			cpu_reset_proxy_active = 0;
485 			printf("cpu_reset: Restarting BSP\n");
486 
487 			/* Restart CPU #0. */
488 			CPU_SETOF(0, &started_cpus);
489 			mb = &pcpu_find(0)->pc_monitorbuf;
490 			atomic_store_int(&mb->stop_state,
491 			    MONITOR_STOPSTATE_RUNNING);
492 
493 			cnt = 0;
494 			while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
495 				ia32_pause();
496 				cnt++;	/* Wait for BSP to announce restart */
497 			}
498 			if (cpu_reset_proxy_active == 0) {
499 				printf("cpu_reset: Failed to restart BSP\n");
500 			} else {
501 				cpu_reset_proxy_active = 2;
502 				while (1)
503 					ia32_pause();
504 				/* NOTREACHED */
505 			}
506 		}
507 	}
508 #endif
509 	cpu_reset_real();
510 	/* NOTREACHED */
511 }
512 
513 bool
cpu_mwait_usable(void)514 cpu_mwait_usable(void)
515 {
516 
517 	return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
518 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
519 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
520 }
521 
522 void (*cpu_idle_hook)(sbintime_t) = NULL;	/* ACPI idle hook. */
523 
524 int cpu_amdc1e_bug = 0;			/* AMD C1E APIC workaround required. */
525 
526 static int	idle_mwait = 1;		/* Use MONITOR/MWAIT for short idle. */
527 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
528     0, "Use MONITOR/MWAIT for short idle");
529 
530 static bool
cpu_idle_enter(int * statep,int newstate)531 cpu_idle_enter(int *statep, int newstate)
532 {
533 	KASSERT(atomic_load_int(statep) == STATE_RUNNING,
534 	    ("%s: state %d", __func__, atomic_load_int(statep)));
535 
536 	/*
537 	 * A fence is needed to prevent reordering of the load in
538 	 * sched_runnable() with this store to the idle state word.  Without it,
539 	 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having
540 	 * added load to the queue, and elide an IPI.  Then, sched_runnable()
541 	 * can observe tdq_load == 0, so the CPU ends up idling with pending
542 	 * work.  tdq_notify() similarly ensures that a prior update to tdq_load
543 	 * is visible before calling cpu_idle_wakeup().
544 	 */
545 	atomic_store_int(statep, newstate);
546 #if defined(SCHED_ULE) && defined(SMP)
547 	atomic_thread_fence_seq_cst();
548 #endif
549 
550 	/*
551 	 * Since we may be in a critical section from cpu_idle(), if
552 	 * an interrupt fires during that critical section we may have
553 	 * a pending preemption.  If the CPU halts, then that thread
554 	 * may not execute until a later interrupt awakens the CPU.
555 	 * To handle this race, check for a runnable thread after
556 	 * disabling interrupts and immediately return if one is
557 	 * found.  Also, we must absolutely guarentee that hlt is
558 	 * the next instruction after sti.  This ensures that any
559 	 * interrupt that fires after the call to disable_intr() will
560 	 * immediately awaken the CPU from hlt.  Finally, please note
561 	 * that on x86 this works fine because of interrupts enabled only
562 	 * after the instruction following sti takes place, while IF is set
563 	 * to 1 immediately, allowing hlt instruction to acknowledge the
564 	 * interrupt.
565 	 */
566 	disable_intr();
567 	if (sched_runnable()) {
568 		enable_intr();
569 		atomic_store_int(statep, STATE_RUNNING);
570 		return (false);
571 	} else {
572 		return (true);
573 	}
574 }
575 
576 static void
cpu_idle_exit(int * statep)577 cpu_idle_exit(int *statep)
578 {
579 	atomic_store_int(statep, STATE_RUNNING);
580 }
581 
582 static void
cpu_idle_acpi(sbintime_t sbt)583 cpu_idle_acpi(sbintime_t sbt)
584 {
585 	int *state;
586 
587 	state = &PCPU_PTR(monitorbuf)->idle_state;
588 	if (cpu_idle_enter(state, STATE_SLEEPING)) {
589 		if (cpu_idle_hook)
590 			cpu_idle_hook(sbt);
591 		else
592 			acpi_cpu_c1();
593 		cpu_idle_exit(state);
594 	}
595 }
596 
597 static void
cpu_idle_hlt(sbintime_t sbt)598 cpu_idle_hlt(sbintime_t sbt)
599 {
600 	int *state;
601 
602 	state = &PCPU_PTR(monitorbuf)->idle_state;
603 	if (cpu_idle_enter(state, STATE_SLEEPING)) {
604 		acpi_cpu_c1();
605 		atomic_store_int(state, STATE_RUNNING);
606 	}
607 }
608 
609 static void
cpu_idle_mwait(sbintime_t sbt)610 cpu_idle_mwait(sbintime_t sbt)
611 {
612 	int *state;
613 
614 	state = &PCPU_PTR(monitorbuf)->idle_state;
615 	if (cpu_idle_enter(state, STATE_MWAIT)) {
616 		cpu_monitor(state, 0, 0);
617 		if (atomic_load_int(state) == STATE_MWAIT)
618 			__asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
619 		else
620 			enable_intr();
621 		cpu_idle_exit(state);
622 	}
623 }
624 
625 static void
cpu_idle_spin(sbintime_t sbt)626 cpu_idle_spin(sbintime_t sbt)
627 {
628 	int *state;
629 	int i;
630 
631 	state = &PCPU_PTR(monitorbuf)->idle_state;
632 	atomic_store_int(state, STATE_RUNNING);
633 
634 	/*
635 	 * The sched_runnable() call is racy but as long as there is
636 	 * a loop missing it one time will have just a little impact if any
637 	 * (and it is much better than missing the check at all).
638 	 */
639 	for (i = 0; i < 1000; i++) {
640 		if (sched_runnable())
641 			return;
642 		cpu_spinwait();
643 	}
644 }
645 
646 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
647 
648 void
cpu_idle(int busy)649 cpu_idle(int busy)
650 {
651 	uint64_t msr;
652 	sbintime_t sbt = -1;
653 
654 	CTR1(KTR_SPARE2, "cpu_idle(%d)", busy);
655 
656 	/* If we are busy - try to use fast methods. */
657 	if (busy) {
658 		if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
659 			cpu_idle_mwait(busy);
660 			goto out;
661 		}
662 	}
663 
664 	/* If we have time - switch timers into idle mode. */
665 	if (!busy) {
666 		critical_enter();
667 		sbt = cpu_idleclock();
668 	}
669 
670 	/* Apply AMD APIC timer C1E workaround. */
671 	if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
672 		msr = rdmsr(MSR_AMDK8_IPM);
673 		if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
674 			wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
675 			    AMDK8_C1EONCMPHALT));
676 	}
677 
678 	/* Call main idle method. */
679 	cpu_idle_fn(sbt);
680 
681 	/* Switch timers back into active mode. */
682 	if (!busy) {
683 		cpu_activeclock();
684 		critical_exit();
685 	}
686 out:
687 	CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy);
688 }
689 
690 static int cpu_idle_apl31_workaround;
691 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
692     &cpu_idle_apl31_workaround, 0,
693     "Apollo Lake APL31 MWAIT bug workaround");
694 
695 int
cpu_idle_wakeup(int cpu)696 cpu_idle_wakeup(int cpu)
697 {
698 	struct monitorbuf *mb;
699 	int *state;
700 
701 	mb = &pcpu_find(cpu)->pc_monitorbuf;
702 	state = &mb->idle_state;
703 	switch (atomic_load_int(state)) {
704 	case STATE_SLEEPING:
705 		return (0);
706 	case STATE_MWAIT:
707 		atomic_store_int(state, STATE_RUNNING);
708 		return (cpu_idle_apl31_workaround ? 0 : 1);
709 	case STATE_RUNNING:
710 		return (1);
711 	default:
712 		panic("bad monitor state");
713 		return (1);
714 	}
715 }
716 
717 /*
718  * Ordered by speed/power consumption.
719  */
720 static const struct {
721 	void	*id_fn;
722 	const char *id_name;
723 	int	id_cpuid2_flag;
724 } idle_tbl[] = {
725 	{ .id_fn = cpu_idle_spin, .id_name = "spin" },
726 	{ .id_fn = cpu_idle_mwait, .id_name = "mwait",
727 	    .id_cpuid2_flag = CPUID2_MON },
728 	{ .id_fn = cpu_idle_hlt, .id_name = "hlt" },
729 	{ .id_fn = cpu_idle_acpi, .id_name = "acpi" },
730 };
731 
732 static int
idle_sysctl_available(SYSCTL_HANDLER_ARGS)733 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
734 {
735 	char *avail, *p;
736 	int error;
737 	int i;
738 
739 	avail = malloc(256, M_TEMP, M_WAITOK);
740 	p = avail;
741 	for (i = 0; i < nitems(idle_tbl); i++) {
742 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
743 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
744 			continue;
745 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
746 		    cpu_idle_hook == NULL)
747 			continue;
748 		p += sprintf(p, "%s%s", p != avail ? ", " : "",
749 		    idle_tbl[i].id_name);
750 	}
751 	error = sysctl_handle_string(oidp, avail, 0, req);
752 	free(avail, M_TEMP);
753 	return (error);
754 }
755 
756 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
757     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
758     0, 0, idle_sysctl_available, "A",
759     "list of available idle functions");
760 
761 static bool
cpu_idle_selector(const char * new_idle_name)762 cpu_idle_selector(const char *new_idle_name)
763 {
764 	int i;
765 
766 	for (i = 0; i < nitems(idle_tbl); i++) {
767 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
768 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
769 			continue;
770 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
771 		    cpu_idle_hook == NULL)
772 			continue;
773 		if (strcmp(idle_tbl[i].id_name, new_idle_name))
774 			continue;
775 		cpu_idle_fn = idle_tbl[i].id_fn;
776 		if (bootverbose)
777 			printf("CPU idle set to %s\n", idle_tbl[i].id_name);
778 		return (true);
779 	}
780 	return (false);
781 }
782 
783 static int
cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)784 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
785 {
786 	char buf[16];
787 	const char *p;
788 	int error, i;
789 
790 	p = "unknown";
791 	for (i = 0; i < nitems(idle_tbl); i++) {
792 		if (idle_tbl[i].id_fn == cpu_idle_fn) {
793 			p = idle_tbl[i].id_name;
794 			break;
795 		}
796 	}
797 	strncpy(buf, p, sizeof(buf));
798 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
799 	if (error != 0 || req->newptr == NULL)
800 		return (error);
801 	return (cpu_idle_selector(buf) ? 0 : EINVAL);
802 }
803 
804 SYSCTL_PROC(_machdep, OID_AUTO, idle,
805     CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
806     0, 0, cpu_idle_sysctl, "A",
807     "currently selected idle function");
808 
809 static void
cpu_idle_tun(void * unused __unused)810 cpu_idle_tun(void *unused __unused)
811 {
812 	char tunvar[16];
813 
814 	if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
815 		cpu_idle_selector(tunvar);
816 	else if (cpu_vendor_id == CPU_VENDOR_AMD &&
817 	    CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
818 		/* Ryzen erratas 1057, 1109. */
819 		cpu_idle_selector("hlt");
820 		idle_mwait = 0;
821 		mwait_cpustop_broken = true;
822 	}
823 
824 	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
825 	    CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x5c) {
826 		/*
827 		 * Apollo Lake errata APL31 (public errata APL30).
828 		 * Stores to the armed address range may not trigger
829 		 * MWAIT to resume execution.  OS needs to use
830 		 * interrupts to wake processors from MWAIT-induced
831 		 * sleep states.
832 		 */
833 		cpu_idle_apl31_workaround = 1;
834 		mwait_cpustop_broken = true;
835 	}
836 	TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
837 }
838 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
839 
840 static int panic_on_nmi = 0xff;
841 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
842     &panic_on_nmi, 0,
843     "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
844 int nmi_is_broadcast = 1;
845 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
846     &nmi_is_broadcast, 0,
847     "Chipset NMI is broadcast");
848 int (*apei_nmi)(void);
849 
850 void
nmi_call_kdb(u_int cpu,u_int type,struct trapframe * frame)851 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
852 {
853 	bool claimed = false;
854 
855 #ifdef DEV_ISA
856 	/* machine/parity/power fail/"kitchen sink" faults */
857 	if (isa_nmi(frame->tf_err)) {
858 		claimed = true;
859 		if ((panic_on_nmi & 1) != 0)
860 			panic("NMI indicates hardware failure");
861 	}
862 #endif /* DEV_ISA */
863 
864 	/* ACPI Platform Error Interfaces callback. */
865 	if (apei_nmi != NULL && (*apei_nmi)())
866 		claimed = true;
867 
868 	/*
869 	 * NMIs can be useful for debugging.  They can be hooked up to a
870 	 * pushbutton, usually on an ISA, PCI, or PCIe card.  They can also be
871 	 * generated by an IPMI BMC, either manually or in response to a
872 	 * watchdog timeout.  For example, see the "power diag" command in
873 	 * ports/sysutils/ipmitool.  They can also be generated by a
874 	 * hypervisor; see "bhyvectl --inject-nmi".
875 	 */
876 
877 #ifdef KDB
878 	if (!claimed && (panic_on_nmi & 2) != 0) {
879 		if (debugger_on_panic) {
880 			printf("NMI/cpu%d ... going to debugger\n", cpu);
881 			claimed = kdb_trap(type, 0, frame);
882 		}
883 	}
884 #endif /* KDB */
885 
886 	if (!claimed && panic_on_nmi != 0)
887 		panic("NMI");
888 }
889 
890 /*
891  * Dynamically registered NMI handlers.
892  */
893 struct nmi_handler {
894 	int running;
895 	int (*func)(struct trapframe *);
896 	struct nmi_handler *next;
897 };
898 static struct nmi_handler *nmi_handlers_head = NULL;
899 MALLOC_DEFINE(M_NMI, "NMI handlers",
900     "List entries for dynamically registered NMI handlers");
901 
902 void
nmi_register_handler(int (* handler)(struct trapframe *))903 nmi_register_handler(int (*handler)(struct trapframe *))
904 {
905 	struct nmi_handler *hp;
906 	int (*hpf)(struct trapframe *);
907 
908 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
909 	    (uintptr_t *)&nmi_handlers_head);
910 	while (hp != NULL) {
911 		hpf = hp->func;
912 		MPASS(hpf != handler);
913 		if (hpf == NULL &&
914 		    atomic_cmpset_ptr((volatile uintptr_t *)&hp->func,
915 		    (uintptr_t)NULL, (uintptr_t)handler) != 0) {
916 			hp->running = 0;
917 			return;
918 		}
919 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
920 		    (uintptr_t *)&hp->next);
921 	}
922 	hp = malloc(sizeof(struct nmi_handler), M_NMI, M_WAITOK | M_ZERO);
923 	hp->func = handler;
924 	hp->next = nmi_handlers_head;
925 	while (atomic_fcmpset_rel_ptr(
926 	    (volatile uintptr_t *)&nmi_handlers_head,
927 	    (uintptr_t *)&hp->next, (uintptr_t)hp) == 0)
928 	        ;
929 }
930 
931 void
nmi_remove_handler(int (* handler)(struct trapframe *))932 nmi_remove_handler(int (*handler)(struct trapframe *))
933 {
934 	struct nmi_handler *hp;
935 
936 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
937 	    (uintptr_t *)&nmi_handlers_head);
938 	while (hp != NULL) {
939 		if (hp->func == handler) {
940 			hp->func = NULL;
941 			/* Wait for the handler to exit before returning. */
942 			while (atomic_load_int(&hp->running) != 0)
943 				cpu_spinwait();
944 			return;
945 		}
946 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
947 		    (uintptr_t *)&hp->next);
948 	}
949 
950 	panic("%s: attempting to remove an unregistered NMI handler %p\n",
951 	    __func__, handler);
952 }
953 
954 void
nmi_handle_intr(struct trapframe * frame)955 nmi_handle_intr(struct trapframe *frame)
956 {
957 	int (*func)(struct trapframe *);
958 	struct nmi_handler *hp;
959 	int rv;
960 	bool handled;
961 
962 #ifdef SMP
963 	/* Handler for NMI IPIs used for stopping CPUs. */
964 	if (ipi_nmi_handler() == 0)
965 		return;
966 #endif
967 	handled = false;
968 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
969 	    (uintptr_t *)&nmi_handlers_head);
970 	while (!handled && hp != NULL) {
971 		func = hp->func;
972 		if (func != NULL) {
973 			atomic_add_int(&hp->running, 1);
974 			rv = func(frame);
975 			atomic_subtract_int(&hp->running, 1);
976 			if (rv != 0) {
977 				handled = true;
978 				break;
979 			}
980 		}
981 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
982 		    (uintptr_t *)&hp->next);
983 	}
984 	if (handled)
985 		return;
986 #ifdef SMP
987 	if (nmi_is_broadcast) {
988 		nmi_call_kdb_smp(T_NMI, frame);
989 		return;
990 	}
991 #endif
992 	nmi_call_kdb(PCPU_GET(cpuid), T_NMI, frame);
993 }
994 
995 static int hw_ibrs_active;
996 int hw_ibrs_ibpb_active;
997 int hw_ibrs_disable = 1;
998 
999 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
1000     "Indirect Branch Restricted Speculation active");
1001 
1002 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
1003     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1004     "Indirect Branch Restricted Speculation active");
1005 
1006 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
1007     &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
1008 
1009 void
hw_ibrs_recalculate(bool for_all_cpus)1010 hw_ibrs_recalculate(bool for_all_cpus)
1011 {
1012 	if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
1013 		x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
1014 		    MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
1015 		    (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
1016 		    IA32_SPEC_CTRL_IBRS, NULL);
1017 		hw_ibrs_active = hw_ibrs_disable == 0;
1018 		hw_ibrs_ibpb_active = 0;
1019 	} else {
1020 		hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
1021 		    CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
1022 	}
1023 }
1024 
1025 static int
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)1026 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
1027 {
1028 	int error, val;
1029 
1030 	val = hw_ibrs_disable;
1031 	error = sysctl_handle_int(oidp, &val, 0, req);
1032 	if (error != 0 || req->newptr == NULL)
1033 		return (error);
1034 	hw_ibrs_disable = val != 0;
1035 	hw_ibrs_recalculate(true);
1036 	return (0);
1037 }
1038 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
1039     CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
1040     "Disable Indirect Branch Restricted Speculation");
1041 
1042 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
1043     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1044     hw_ibrs_disable_handler, "I",
1045     "Disable Indirect Branch Restricted Speculation");
1046 
1047 int hw_ssb_active;
1048 int hw_ssb_disable;
1049 
1050 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
1051     &hw_ssb_active, 0,
1052     "Speculative Store Bypass Disable active");
1053 
1054 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
1055     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1056     "Speculative Store Bypass Disable active");
1057 
1058 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
1059     &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
1060 
1061 static void
hw_ssb_set(bool enable,bool for_all_cpus)1062 hw_ssb_set(bool enable, bool for_all_cpus)
1063 {
1064 
1065 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
1066 		hw_ssb_active = 0;
1067 		return;
1068 	}
1069 	hw_ssb_active = enable;
1070 	x86_msr_op(MSR_IA32_SPEC_CTRL,
1071 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1072 	    (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
1073 	    IA32_SPEC_CTRL_SSBD, NULL);
1074 }
1075 
1076 void
hw_ssb_recalculate(bool all_cpus)1077 hw_ssb_recalculate(bool all_cpus)
1078 {
1079 
1080 	switch (hw_ssb_disable) {
1081 	default:
1082 		hw_ssb_disable = 0;
1083 		/* FALLTHROUGH */
1084 	case 0: /* off */
1085 		hw_ssb_set(false, all_cpus);
1086 		break;
1087 	case 1: /* on */
1088 		hw_ssb_set(true, all_cpus);
1089 		break;
1090 	case 2: /* auto */
1091 		hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
1092 		    false : true, all_cpus);
1093 		break;
1094 	}
1095 }
1096 
1097 static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)1098 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
1099 {
1100 	int error, val;
1101 
1102 	val = hw_ssb_disable;
1103 	error = sysctl_handle_int(oidp, &val, 0, req);
1104 	if (error != 0 || req->newptr == NULL)
1105 		return (error);
1106 	hw_ssb_disable = val;
1107 	hw_ssb_recalculate(true);
1108 	return (0);
1109 }
1110 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1111     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1112     hw_ssb_disable_handler, "I",
1113     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1114 
1115 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1116     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1117     hw_ssb_disable_handler, "I",
1118     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1119 
1120 int hw_mds_disable;
1121 
1122 /*
1123  * Handler for Microarchitectural Data Sampling issues.  Really not a
1124  * pointer to C function: on amd64 the code must not change any CPU
1125  * architectural state except possibly %rflags. Also, it is always
1126  * called with interrupts disabled.
1127  */
1128 void mds_handler_void(void);
1129 void mds_handler_verw(void);
1130 void mds_handler_ivb(void);
1131 void mds_handler_bdw(void);
1132 void mds_handler_skl_sse(void);
1133 void mds_handler_skl_avx(void);
1134 void mds_handler_skl_avx512(void);
1135 void mds_handler_silvermont(void);
1136 void (*mds_handler)(void) = mds_handler_void;
1137 
1138 static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)1139 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1140 {
1141 	const char *state;
1142 
1143 	if (mds_handler == mds_handler_void)
1144 		state = "inactive";
1145 	else if (mds_handler == mds_handler_verw)
1146 		state = "VERW";
1147 	else if (mds_handler == mds_handler_ivb)
1148 		state = "software IvyBridge";
1149 	else if (mds_handler == mds_handler_bdw)
1150 		state = "software Broadwell";
1151 	else if (mds_handler == mds_handler_skl_sse)
1152 		state = "software Skylake SSE";
1153 	else if (mds_handler == mds_handler_skl_avx)
1154 		state = "software Skylake AVX";
1155 	else if (mds_handler == mds_handler_skl_avx512)
1156 		state = "software Skylake AVX512";
1157 	else if (mds_handler == mds_handler_silvermont)
1158 		state = "software Silvermont";
1159 	else
1160 		state = "unknown";
1161 	return (SYSCTL_OUT(req, state, strlen(state)));
1162 }
1163 
1164 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1165     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1166     sysctl_hw_mds_disable_state_handler, "A",
1167     "Microarchitectural Data Sampling Mitigation state");
1168 
1169 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1170     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1171     "Microarchitectural Data Sampling Mitigation state");
1172 
1173 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1174     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1175     sysctl_hw_mds_disable_state_handler, "A",
1176     "Microarchitectural Data Sampling Mitigation state");
1177 
1178 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1179 
1180 void
hw_mds_recalculate(void)1181 hw_mds_recalculate(void)
1182 {
1183 	struct pcpu *pc;
1184 	vm_offset_t b64;
1185 	u_long xcr0;
1186 	int i;
1187 
1188 	/*
1189 	 * Allow user to force VERW variant even if MD_CLEAR is not
1190 	 * reported.  For instance, hypervisor might unknowingly
1191 	 * filter the cap out.
1192 	 * For the similar reasons, and for testing, allow to enable
1193 	 * mitigation even when MDS_NO cap is set.
1194 	 */
1195 	if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1196 	    ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1197 	    hw_mds_disable == 3)) {
1198 		mds_handler = mds_handler_void;
1199 	} else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1200 	    hw_mds_disable == 3) || hw_mds_disable == 1) {
1201 		mds_handler = mds_handler_verw;
1202 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1203 	    (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1204 	    CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1205 	    CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1206 	    CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1207 	    CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1208 	    CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1209 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1210 		/*
1211 		 * Nehalem, SandyBridge, IvyBridge
1212 		 */
1213 		CPU_FOREACH(i) {
1214 			pc = pcpu_find(i);
1215 			if (pc->pc_mds_buf == NULL) {
1216 				pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1217 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1218 				bzero(pc->pc_mds_buf, 16);
1219 			}
1220 		}
1221 		mds_handler = mds_handler_ivb;
1222 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1223 	    (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1224 	    CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1225 	    CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1226 	    CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1227 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1228 		/*
1229 		 * Haswell, Broadwell
1230 		 */
1231 		CPU_FOREACH(i) {
1232 			pc = pcpu_find(i);
1233 			if (pc->pc_mds_buf == NULL) {
1234 				pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1235 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1236 				bzero(pc->pc_mds_buf, 16);
1237 			}
1238 		}
1239 		mds_handler = mds_handler_bdw;
1240 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1241 	    ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1242 	    CPUID_STEPPING) <= 5) ||
1243 	    CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1244 	    (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1245 	    CPUID_STEPPING) <= 0xb) ||
1246 	    (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1247 	    CPUID_STEPPING) <= 0xc)) &&
1248 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1249 		/*
1250 		 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1251 		 * CascadeLake
1252 		 */
1253 		CPU_FOREACH(i) {
1254 			pc = pcpu_find(i);
1255 			if (pc->pc_mds_buf == NULL) {
1256 				pc->pc_mds_buf = malloc_domainset(6 * 1024,
1257 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1258 				    M_WAITOK);
1259 				b64 = (vm_offset_t)malloc_domainset(64 + 63,
1260 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1261 				    M_WAITOK);
1262 				pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1263 				bzero(pc->pc_mds_buf64, 64);
1264 			}
1265 		}
1266 		xcr0 = rxcr(0);
1267 		if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1268 		    (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1269 			mds_handler = mds_handler_skl_avx512;
1270 		else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1271 		    (cpu_feature2 & CPUID2_AVX) != 0)
1272 			mds_handler = mds_handler_skl_avx;
1273 		else
1274 			mds_handler = mds_handler_skl_sse;
1275 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1276 	    ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1277 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
1278 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
1279 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
1280 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
1281 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
1282 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
1283 	    CPUID_TO_MODEL(cpu_id) == 0x65 ||
1284 	    CPUID_TO_MODEL(cpu_id) == 0x75 ||
1285 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
1286 	    CPUID_TO_MODEL(cpu_id) == 0x26 ||
1287 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
1288 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
1289 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
1290 	    CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1291 		/* Silvermont, Airmont */
1292 		CPU_FOREACH(i) {
1293 			pc = pcpu_find(i);
1294 			if (pc->pc_mds_buf == NULL)
1295 				pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1296 		}
1297 		mds_handler = mds_handler_silvermont;
1298 	} else {
1299 		hw_mds_disable = 0;
1300 		mds_handler = mds_handler_void;
1301 	}
1302 }
1303 
1304 static void
hw_mds_recalculate_boot(void * arg __unused)1305 hw_mds_recalculate_boot(void *arg __unused)
1306 {
1307 
1308 	hw_mds_recalculate();
1309 }
1310 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1311 
1312 static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)1313 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1314 {
1315 	int error, val;
1316 
1317 	val = hw_mds_disable;
1318 	error = sysctl_handle_int(oidp, &val, 0, req);
1319 	if (error != 0 || req->newptr == NULL)
1320 		return (error);
1321 	if (val < 0 || val > 3)
1322 		return (EINVAL);
1323 	hw_mds_disable = val;
1324 	hw_mds_recalculate();
1325 	return (0);
1326 }
1327 
1328 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1329     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1330     sysctl_mds_disable_handler, "I",
1331     "Microarchitectural Data Sampling Mitigation "
1332     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1333 
1334 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1335     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1336     sysctl_mds_disable_handler, "I",
1337     "Microarchitectural Data Sampling Mitigation "
1338     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1339 
1340 /*
1341  * Intel Transactional Memory Asynchronous Abort Mitigation
1342  * CVE-2019-11135
1343  */
1344 int x86_taa_enable;
1345 int x86_taa_state;
1346 enum {
1347 	TAA_NONE	= 0,	/* No mitigation enabled */
1348 	TAA_TSX_DISABLE	= 1,	/* Disable TSX via MSR */
1349 	TAA_VERW	= 2,	/* Use VERW mitigation */
1350 	TAA_AUTO	= 3,	/* Automatically select the mitigation */
1351 
1352 	/* The states below are not selectable by the operator */
1353 
1354 	TAA_TAA_UC	= 4,	/* Mitigation present in microcode */
1355 	TAA_NOT_PRESENT	= 5	/* TSX is not present */
1356 };
1357 
1358 static void
taa_set(bool enable,bool all)1359 taa_set(bool enable, bool all)
1360 {
1361 
1362 	x86_msr_op(MSR_IA32_TSX_CTRL,
1363 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1364 	    (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1365 	    IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1366 	    NULL);
1367 }
1368 
1369 void
x86_taa_recalculate(void)1370 x86_taa_recalculate(void)
1371 {
1372 	static int taa_saved_mds_disable = 0;
1373 	int taa_need = 0, taa_state = 0;
1374 	int mds_disable = 0, need_mds_recalc = 0;
1375 
1376 	/* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1377 	if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1378 	    (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1379 		/* TSX is not present */
1380 		x86_taa_state = TAA_NOT_PRESENT;
1381 		return;
1382 	}
1383 
1384 	/* Check to see what mitigation options the CPU gives us */
1385 	if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1386 		/* CPU is not suseptible to TAA */
1387 		taa_need = TAA_TAA_UC;
1388 	} else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1389 		/*
1390 		 * CPU can turn off TSX.  This is the next best option
1391 		 * if TAA_NO hardware mitigation isn't present
1392 		 */
1393 		taa_need = TAA_TSX_DISABLE;
1394 	} else {
1395 		/* No TSX/TAA specific remedies are available. */
1396 		if (x86_taa_enable == TAA_TSX_DISABLE) {
1397 			if (bootverbose)
1398 				printf("TSX control not available\n");
1399 			return;
1400 		} else
1401 			taa_need = TAA_VERW;
1402 	}
1403 
1404 	/* Can we automatically take action, or are we being forced? */
1405 	if (x86_taa_enable == TAA_AUTO)
1406 		taa_state = taa_need;
1407 	else
1408 		taa_state = x86_taa_enable;
1409 
1410 	/* No state change, nothing to do */
1411 	if (taa_state == x86_taa_state) {
1412 		if (bootverbose)
1413 			printf("No TSX change made\n");
1414 		return;
1415 	}
1416 
1417 	/* Does the MSR need to be turned on or off? */
1418 	if (taa_state == TAA_TSX_DISABLE)
1419 		taa_set(true, true);
1420 	else if (x86_taa_state == TAA_TSX_DISABLE)
1421 		taa_set(false, true);
1422 
1423 	/* Does MDS need to be set to turn on VERW? */
1424 	if (taa_state == TAA_VERW) {
1425 		taa_saved_mds_disable = hw_mds_disable;
1426 		mds_disable = hw_mds_disable = 1;
1427 		need_mds_recalc = 1;
1428 	} else if (x86_taa_state == TAA_VERW) {
1429 		mds_disable = hw_mds_disable = taa_saved_mds_disable;
1430 		need_mds_recalc = 1;
1431 	}
1432 	if (need_mds_recalc) {
1433 		hw_mds_recalculate();
1434 		if (mds_disable != hw_mds_disable) {
1435 			if (bootverbose)
1436 				printf("Cannot change MDS state for TAA\n");
1437 			/* Don't update our state */
1438 			return;
1439 		}
1440 	}
1441 
1442 	x86_taa_state = taa_state;
1443 	return;
1444 }
1445 
1446 static void
taa_recalculate_boot(void * arg __unused)1447 taa_recalculate_boot(void * arg __unused)
1448 {
1449 
1450 	x86_taa_recalculate();
1451 }
1452 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1453 
1454 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1455     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1456     "TSX Asynchronous Abort Mitigation");
1457 
1458 static int
sysctl_taa_handler(SYSCTL_HANDLER_ARGS)1459 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1460 {
1461 	int error, val;
1462 
1463 	val = x86_taa_enable;
1464 	error = sysctl_handle_int(oidp, &val, 0, req);
1465 	if (error != 0 || req->newptr == NULL)
1466 		return (error);
1467 	if (val < TAA_NONE || val > TAA_AUTO)
1468 		return (EINVAL);
1469 	x86_taa_enable = val;
1470 	x86_taa_recalculate();
1471 	return (0);
1472 }
1473 
1474 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1475     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1476     sysctl_taa_handler, "I",
1477     "TAA Mitigation enablement control "
1478     "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1479 
1480 static int
sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)1481 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1482 {
1483 	const char *state;
1484 
1485 	switch (x86_taa_state) {
1486 	case TAA_NONE:
1487 		state = "inactive";
1488 		break;
1489 	case TAA_TSX_DISABLE:
1490 		state = "TSX disabled";
1491 		break;
1492 	case TAA_VERW:
1493 		state = "VERW";
1494 		break;
1495 	case TAA_TAA_UC:
1496 		state = "Mitigated in microcode";
1497 		break;
1498 	case TAA_NOT_PRESENT:
1499 		state = "TSX not present";
1500 		break;
1501 	default:
1502 		state = "unknown";
1503 	}
1504 
1505 	return (SYSCTL_OUT(req, state, strlen(state)));
1506 }
1507 
1508 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1509     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1510     sysctl_taa_state_handler, "A",
1511     "TAA Mitigation state");
1512 
1513 int __read_frequently cpu_flush_rsb_ctxsw;
1514 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1515     CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1516     "Flush Return Stack Buffer on context switch");
1517 
1518 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1519     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1520     "MCU Optimization, disable RDSEED mitigation");
1521 
1522 int x86_rngds_mitg_enable = 1;
1523 void
x86_rngds_mitg_recalculate(bool all_cpus)1524 x86_rngds_mitg_recalculate(bool all_cpus)
1525 {
1526 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1527 		return;
1528 	x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1529 	    (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1530 	    (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1531 	    IA32_RNGDS_MITG_DIS, NULL);
1532 }
1533 
1534 static int
sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)1535 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1536 {
1537 	int error, val;
1538 
1539 	val = x86_rngds_mitg_enable;
1540 	error = sysctl_handle_int(oidp, &val, 0, req);
1541 	if (error != 0 || req->newptr == NULL)
1542 		return (error);
1543 	x86_rngds_mitg_enable = val;
1544 	x86_rngds_mitg_recalculate(true);
1545 	return (0);
1546 }
1547 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1548     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1549     sysctl_rngds_mitg_enable_handler, "I",
1550     "MCU Optimization, disabling RDSEED mitigation control "
1551     "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1552 
1553 static int
sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)1554 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1555 {
1556 	const char *state;
1557 
1558 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1559 		state = "Not applicable";
1560 	} else if (x86_rngds_mitg_enable == 0) {
1561 		state = "RDSEED not serialized";
1562 	} else {
1563 		state = "Mitigated";
1564 	}
1565 	return (SYSCTL_OUT(req, state, strlen(state)));
1566 }
1567 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1568     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1569     sysctl_rngds_state_handler, "A",
1570     "MCU Optimization state");
1571 
1572 
1573 /*
1574  * Zenbleed.
1575  *
1576  * No corresponding errata is publicly listed.  AMD has issued a security
1577  * bulletin (AMD-SB-7008), entitled "Cross-Process Information Leak".  This
1578  * document lists (as of August 2023) platform firmware's availability target
1579  * dates, with most being November/December 2023.  It will then be up to
1580  * motherboard manufacturers to produce corresponding BIOS updates, which will
1581  * happen with an inevitable lag.  Additionally, for a variety of reasons,
1582  * operators might not be able to apply them everywhere due.  On the side of
1583  * standalone CPU microcodes, no plans for availability have been published so
1584  * far.  However, a developer appearing to be an AMD employee has hardcoded in
1585  * Linux revision numbers of future microcodes that are presumed to fix the
1586  * vulnerability.
1587  *
1588  * Given the stability issues encountered with early microcode releases for Rome
1589  * (the only microcode publicly released so far) and the absence of official
1590  * communication on standalone CPU microcodes, we have opted instead for
1591  * matching by default all AMD Zen2 processors which, according to the
1592  * vulnerability's discoverer, are all affected (see
1593  * https://lock.cmpxchg8b.com/zenbleed.html).  This policy, also adopted by
1594  * OpenBSD, may be overriden using the tunable/sysctl
1595  * 'machdep.mitigations.zenbleed.enable'.  We might revise it later depending on
1596  * official statements, microcode updates' public availability and community
1597  * assessment that they actually fix the vulnerability without any instability
1598  * side effects.
1599  */
1600 
1601 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, zenbleed,
1602     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1603     "Zenbleed OS-triggered prevention (via chicken bit)");
1604 
1605 /* 2 is auto, see below. */
1606 int zenbleed_enable = 2;
1607 
1608 void
zenbleed_sanitize_enable(void)1609 zenbleed_sanitize_enable(void)
1610 {
1611 	/* Default to auto (2). */
1612 	if (zenbleed_enable < 0 || zenbleed_enable > 2)
1613 		zenbleed_enable = 2;
1614 }
1615 
1616 static bool
zenbleed_chicken_bit_applicable(void)1617 zenbleed_chicken_bit_applicable(void)
1618 {
1619 	/* Concerns only bare-metal AMD Zen2 processors. */
1620 	return (cpu_vendor_id == CPU_VENDOR_AMD &&
1621 	    CPUID_TO_FAMILY(cpu_id) == 0x17 &&
1622 	    CPUID_TO_MODEL(cpu_id) >= 0x30 &&
1623 	    vm_guest == VM_GUEST_NO);
1624 }
1625 
1626 static bool
zenbleed_chicken_bit_should_enable(void)1627 zenbleed_chicken_bit_should_enable(void)
1628 {
1629 	/*
1630 	 * Obey tunable/sysctl.
1631 	 *
1632 	 * As explained above, currently, the automatic setting (2) and the "on"
1633 	 * one (1) have the same effect.  In the future, we might additionally
1634 	 * check for specific microcode revisions as part of the automatic
1635 	 * determination.
1636 	 */
1637 	return (zenbleed_enable != 0);
1638 }
1639 
1640 void
zenbleed_check_and_apply(bool all_cpus)1641 zenbleed_check_and_apply(bool all_cpus)
1642 {
1643 	bool set;
1644 
1645 	if (!zenbleed_chicken_bit_applicable())
1646 		return;
1647 
1648 	set = zenbleed_chicken_bit_should_enable();
1649 
1650 	x86_msr_op(MSR_DE_CFG,
1651 	    (set ? MSR_OP_OR : MSR_OP_ANDNOT) |
1652 	    (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1653 	    DE_CFG_ZEN2_FP_BACKUP_FIX_BIT, NULL);
1654 }
1655 
1656 static int
sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)1657 sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)
1658 {
1659 	int error, val;
1660 
1661 	val = zenbleed_enable;
1662 	error = sysctl_handle_int(oidp, &val, 0, req);
1663 	if (error != 0 || req->newptr == NULL)
1664 		return (error);
1665 	zenbleed_enable = val;
1666 	zenbleed_sanitize_enable();
1667 	zenbleed_check_and_apply(true);
1668 	return (0);
1669 }
1670 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, enable, CTLTYPE_INT |
1671     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1672     sysctl_zenbleed_enable_handler, "I",
1673     "Enable Zenbleed OS-triggered mitigation (chicken bit) "
1674     "(0: Force disable, 1: Force enable, 2: Automatic determination)");
1675 
1676 static int
sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)1677 sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)
1678 {
1679 	const char *state;
1680 
1681 	if (!zenbleed_chicken_bit_applicable())
1682 		state = "Not applicable";
1683 	else if (zenbleed_chicken_bit_should_enable())
1684 		state = "Mitigation enabled";
1685 	else
1686 		state = "Mitigation disabled";
1687 	return (SYSCTL_OUT(req, state, strlen(state)));
1688 }
1689 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, state,
1690     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1691     sysctl_zenbleed_state_handler, "A",
1692     "Zenbleed OS-triggered mitigation (chicken bit) state");
1693 
1694 
1695 /*
1696  * Enable and restore kernel text write permissions.
1697  * Callers must ensure that disable_wp()/restore_wp() are executed
1698  * without rescheduling on the same core.
1699  */
1700 bool
disable_wp(void)1701 disable_wp(void)
1702 {
1703 	u_int cr0;
1704 
1705 	cr0 = rcr0();
1706 	if ((cr0 & CR0_WP) == 0)
1707 		return (false);
1708 	load_cr0(cr0 & ~CR0_WP);
1709 	return (true);
1710 }
1711 
1712 void
restore_wp(bool old_wp)1713 restore_wp(bool old_wp)
1714 {
1715 
1716 	if (old_wp)
1717 		load_cr0(rcr0() | CR0_WP);
1718 }
1719 
1720 bool
acpi_get_fadt_bootflags(uint16_t * flagsp)1721 acpi_get_fadt_bootflags(uint16_t *flagsp)
1722 {
1723 #ifdef DEV_ACPI
1724 	ACPI_TABLE_FADT *fadt;
1725 	vm_paddr_t physaddr;
1726 
1727 	physaddr = acpi_find_table(ACPI_SIG_FADT);
1728 	if (physaddr == 0)
1729 		return (false);
1730 	fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1731 	if (fadt == NULL)
1732 		return (false);
1733 	*flagsp = fadt->BootFlags;
1734 	acpi_unmap_table(fadt);
1735 	return (true);
1736 #else
1737 	return (false);
1738 #endif
1739 }
1740 
1741 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1742 {
1743 	bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1744 	    cpu_vendor_id == CPU_VENDOR_HYGON;
1745 
1746 	if ((amd_feature & AMDID_RDTSCP) != 0)
1747 		return (rdtscp);
1748 	else if ((cpu_feature & CPUID_SSE2) != 0)
1749 		return (cpu_is_amd ? rdtsc_ordered_mfence :
1750 		    rdtsc_ordered_lfence);
1751 	else
1752 		return (rdtsc);
1753 }
1754