1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 *
38 * Copyright 2014 Pluribus Networks Inc.
39 * Copyright 2018 Joyent, Inc.
40 * Copyright 2024 Oxide Computer Company
41 */
42
43 #include <sys/types.h>
44 #include <sys/stdbool.h>
45 #include <sys/errno.h>
46
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
49
50 #include <machine/vmm.h>
51 #include <sys/vmm_kernel.h>
52
53 #include "vmm_host.h"
54 #include "vmm_util.h"
55
56 /*
57 * CPUID Emulation
58 *
59 * All CPUID instruction exits are handled by the in-kernel emulation.
60 *
61 * ----------------
62 * Legacy Emulation
63 * ----------------
64 *
65 * Originally, the kernel vmm portion of bhyve relied on fixed logic to filter
66 * and/or generate CPUID results based on what was reported by the host CPU, as
67 * well as attributes of the VM (such as CPU topology, and enabled features).
68 * This is largely adequate to expose CPU capabilities to the guest in manner
69 * which allows it to operate properly.
70 *
71 * ------------------------------
72 * Userspace-Controlled Emulation
73 * ------------------------------
74 *
75 * In certain situations, more control over the CPUID emulation results present
76 * to the guest is desired. Live migration between physical hosts is one such
77 * example, where the underlying CPUs, or at least their microcode, may differ
78 * between the source and destination. In such cases, where changes to the
79 * CPUID results cannot be tolerated, the userspace portion of the VMM can be in
80 * complete control over the leaves which are presented to the guest. It may
81 * still consult the "legacy" CPUID data for guidance about which CPU features
82 * are safe to expose (due to hypervisor limitations, etc). This leaf
83 * information is configured on a per-vCPU basis.
84 *
85 * The emulation entries provided by userspace are expected to be in sorted
86 * order, running from lowest function and index to highest.
87 *
88 * For example:
89 * (func: 00h idx: 00h) ->
90 * (flags: 0, eax: highest std leaf, ebx-edx: vendor id)
91 * (func: 0Dh idx: 00h) ->
92 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XCR0/XSAVE info)
93 * (func: 0Dh idx: 01h) ->
94 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XSAVE/XSAVEOPT details)
95 * ...
96 * (func: 0Dh idx: 07H) ->
97 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: AVX-512 details)
98 * (func: 8000000h idx: 0h) ->
99 * (flags: 0, eax: highest extd leaf ...)
100 * ...
101 */
102
103
104 #define CPUID_TYPE_MASK 0xf0000000
105 #define CPUID_TYPE_STD 0x00000000
106 #define CPUID_TYPE_EXTD 0x80000000
107
108 static const struct vcpu_cpuid_entry cpuid_empty_entry = { 0 };
109
110 /*
111 * Given the CPUID configuration for a vCPU, locate the entry which matches the
112 * provided function/index tuple. The entries list is walked in order, and the
113 * first valid match based on the function/index and flags will be emitted.
114 *
115 * If no match is found, but Intel-style fallback is configured, then the
116 * highest standard leaf encountered will be emitted.
117 */
118 static const struct vcpu_cpuid_entry *
cpuid_find_entry(const vcpu_cpuid_config_t * cfg,uint32_t func,uint32_t idx)119 cpuid_find_entry(const vcpu_cpuid_config_t *cfg, uint32_t func, uint32_t idx)
120 {
121 const struct vcpu_cpuid_entry *last_std = NULL;
122 const bool intel_fallback =
123 (cfg->vcc_flags & VCC_FLAG_INTEL_FALLBACK) != 0;
124 bool matched_leaf = false;
125
126 ASSERT0(cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING);
127
128 for (uint_t i = 0; i < cfg->vcc_nent; i++) {
129 const struct vcpu_cpuid_entry *ent = &cfg->vcc_entries[i];
130 const bool ent_is_std =
131 (ent->vce_function & CPUID_TYPE_MASK) == CPUID_TYPE_STD;
132 const bool ent_must_match_idx =
133 (ent->vce_flags & VCE_FLAG_MATCH_INDEX) != 0;
134
135 if (ent_is_std) {
136 /*
137 * Keep track of the last "standard" leaf for
138 * Intel-style fallback behavior.
139 *
140 * This does currently not account for the sub-leaf
141 * index matching behavior for fallback described in the
142 * SDM. It is not clear if any consumers rely on such
143 * matching when encountering fallback.
144 */
145 last_std = ent;
146 }
147 if (ent->vce_function == func) {
148 if (ent->vce_index == idx || !ent_must_match_idx) {
149 return (ent);
150 }
151 /*
152 * Make note of when the top-level leaf matches, even
153 * when the index does not.
154 */
155 matched_leaf = true;
156 } else if (ent->vce_function > func) {
157 if ((ent->vce_function & CPUID_TYPE_MASK) ==
158 (func & CPUID_TYPE_MASK)) {
159 /*
160 * We are beyond a valid leaf to match, but have
161 * not exceeded the maximum leaf for this "type"
162 * (standard, extended, hvm, etc), so return an
163 * empty entry.
164 */
165 return (&cpuid_empty_entry);
166 } else {
167 /*
168 * Otherwise, we can stop now, having gone
169 * beyond the last entry which could match the
170 * target function in a sorted list.
171 */
172 break;
173 }
174 }
175 }
176
177 if (matched_leaf || !intel_fallback) {
178 return (&cpuid_empty_entry);
179 } else {
180 return (last_std);
181 }
182 }
183
184 void
vcpu_emulate_cpuid(struct vm * vm,int vcpuid,uint64_t * rax,uint64_t * rbx,uint64_t * rcx,uint64_t * rdx)185 vcpu_emulate_cpuid(struct vm *vm, int vcpuid, uint64_t *rax, uint64_t *rbx,
186 uint64_t *rcx, uint64_t *rdx)
187 {
188 const vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid);
189
190 ASSERT3P(rax, !=, NULL);
191 ASSERT3P(rbx, !=, NULL);
192 ASSERT3P(rcx, !=, NULL);
193 ASSERT3P(rdx, !=, NULL);
194
195 /* Fall back to legacy handling if specified */
196 if ((cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0) {
197 uint32_t regs[4] = { *rax, 0, *rcx, 0 };
198
199 legacy_emulate_cpuid(vm, vcpuid, ®s[0], ®s[1], ®s[2],
200 ®s[3]);
201 /* CPUID clears the upper 32-bits of the long-mode registers. */
202 *rax = regs[0];
203 *rbx = regs[1];
204 *rcx = regs[2];
205 *rdx = regs[3];
206 return;
207 }
208
209 const struct vcpu_cpuid_entry *ent = cpuid_find_entry(cfg, *rax, *rcx);
210 ASSERT(ent != NULL);
211 /* CPUID clears the upper 32-bits of the long-mode registers. */
212 *rax = ent->vce_eax;
213 *rbx = ent->vce_ebx;
214 *rcx = ent->vce_ecx;
215 *rdx = ent->vce_edx;
216 }
217
218 /*
219 * Get the current CPUID emulation configuration for this vCPU.
220 *
221 * Only the existing flags will be emitted if the vCPU is configured for legacy
222 * operation via the VCC_FLAG_LEGACY_HANDLING flag. If in userspace-controlled
223 * mode, then we will attempt to copy the existing entries into vcc_entries,
224 * its side specified by vcc_nent.
225 *
226 * Regardless of whether vcc_entries is adequately sized (or even present),
227 * vcc_nent will be set to the number of existing entries.
228 */
229 int
vm_get_cpuid(struct vm * vm,int vcpuid,vcpu_cpuid_config_t * res)230 vm_get_cpuid(struct vm *vm, int vcpuid, vcpu_cpuid_config_t *res)
231 {
232 if (vcpuid < 0 || vcpuid > VM_MAXCPU) {
233 return (EINVAL);
234 }
235
236 const vcpu_cpuid_config_t *src = vm_cpuid_config(vm, vcpuid);
237 if (src->vcc_nent > res->vcc_nent) {
238 res->vcc_nent = src->vcc_nent;
239 return (E2BIG);
240 } else if (src->vcc_nent != 0) {
241 bcopy(src->vcc_entries, res->vcc_entries,
242 src->vcc_nent * sizeof (struct vcpu_cpuid_entry));
243 }
244 res->vcc_flags = src->vcc_flags;
245 res->vcc_nent = src->vcc_nent;
246 return (0);
247 }
248
249 /*
250 * Set the CPUID emulation configuration for this vCPU.
251 *
252 * If VCC_FLAG_LEGACY_HANDLING is set in vcc_flags, then vcc_nent is expected to
253 * be set to 0, as configuring a list of entries would be useless when using the
254 * legacy handling.
255 *
256 * Any existing entries which are configured are freed, and the newly provided
257 * ones will be copied into their place.
258 */
259 int
vm_set_cpuid(struct vm * vm,int vcpuid,const vcpu_cpuid_config_t * src)260 vm_set_cpuid(struct vm *vm, int vcpuid, const vcpu_cpuid_config_t *src)
261 {
262 if (vcpuid < 0 || vcpuid > VM_MAXCPU) {
263 return (EINVAL);
264 }
265 if (src->vcc_nent > VMM_MAX_CPUID_ENTRIES) {
266 return (EINVAL);
267 }
268 if ((src->vcc_flags & ~VCC_FLAGS_VALID) != 0) {
269 return (EINVAL);
270 }
271 if ((src->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0 &&
272 src->vcc_nent != 0) {
273 /* No entries should be provided if using legacy handling */
274 return (EINVAL);
275 }
276 for (uint_t i = 0; i < src->vcc_nent; i++) {
277 /* Ensure all entries carry valid flags */
278 if ((src->vcc_entries[i].vce_flags & ~VCE_FLAGS_VALID) != 0) {
279 return (EINVAL);
280 }
281 }
282
283 vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid);
284
285 /* Free any existing entries first */
286 vcpu_cpuid_cleanup(cfg);
287
288 /* Copy supplied entries into freshly allocated space */
289 if (src->vcc_nent != 0) {
290 const size_t entries_sz =
291 src->vcc_nent * sizeof (struct vcpu_cpuid_entry);
292
293 cfg->vcc_nent = src->vcc_nent;
294 cfg->vcc_entries = kmem_alloc(entries_sz, KM_SLEEP);
295 bcopy(src->vcc_entries, cfg->vcc_entries, entries_sz);
296 }
297 cfg->vcc_flags = src->vcc_flags;
298
299 return (0);
300 }
301
302 void
vcpu_cpuid_init(vcpu_cpuid_config_t * cfg)303 vcpu_cpuid_init(vcpu_cpuid_config_t *cfg)
304 {
305 /* Default to legacy-style handling */
306 cfg->vcc_flags = VCC_FLAG_LEGACY_HANDLING;
307 cfg->vcc_nent = 0;
308 cfg->vcc_entries = NULL;
309 }
310
311 void
vcpu_cpuid_cleanup(vcpu_cpuid_config_t * cfg)312 vcpu_cpuid_cleanup(vcpu_cpuid_config_t *cfg)
313 {
314 if (cfg->vcc_nent != 0) {
315 ASSERT3P(cfg->vcc_entries, !=, NULL);
316
317 kmem_free(cfg->vcc_entries,
318 cfg->vcc_nent * sizeof (struct vcpu_cpuid_entry));
319
320 cfg->vcc_nent = 0;
321 cfg->vcc_entries = NULL;
322 }
323 }
324
325 static const char bhyve_id[12] = "bhyve bhyve ";
326
327 /*
328 * Force exposition of the invariant TSC capability, regardless of whether the
329 * host CPU reports having it.
330 */
331 static int vmm_force_invariant_tsc = 0;
332
333 #define CPUID_0000_0000 (0x0)
334 #define CPUID_0000_0001 (0x1)
335 #define CPUID_0000_0002 (0x2)
336 #define CPUID_0000_0003 (0x3)
337 #define CPUID_0000_0004 (0x4)
338 #define CPUID_0000_0006 (0x6)
339 #define CPUID_0000_0007 (0x7)
340 #define CPUID_0000_000A (0xA)
341 #define CPUID_0000_000B (0xB)
342 #define CPUID_0000_000D (0xD)
343 #define CPUID_0000_000F (0xF)
344 #define CPUID_0000_0010 (0x10)
345 #define CPUID_0000_0015 (0x15)
346 #define CPUID_8000_0000 (0x80000000)
347 #define CPUID_8000_0001 (0x80000001)
348 #define CPUID_8000_0002 (0x80000002)
349 #define CPUID_8000_0003 (0x80000003)
350 #define CPUID_8000_0004 (0x80000004)
351 #define CPUID_8000_0006 (0x80000006)
352 #define CPUID_8000_0007 (0x80000007)
353 #define CPUID_8000_0008 (0x80000008)
354 #define CPUID_8000_001D (0x8000001D)
355 #define CPUID_8000_001E (0x8000001E)
356
357 #define CPUID_VM_HIGH 0x40000000
358
359 /*
360 * CPUID instruction Fn0000_0001:
361 */
362 #define CPUID_0000_0001_APICID_SHIFT 24
363
364
365 /*
366 * Compute ceil(log2(x)). Returns -1 if x is zero.
367 */
368 static __inline int
log2(uint_t x)369 log2(uint_t x)
370 {
371 return (x == 0 ? -1 : fls(x - 1));
372 }
373
374 /*
375 * The "legacy" bhyve cpuid emulation, which largly applies statically defined
376 * masks to the data provided by the host CPU.
377 */
378 void
legacy_emulate_cpuid(struct vm * vm,int vcpu_id,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)379 legacy_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx,
380 uint32_t *ecx, uint32_t *edx)
381 {
382 const struct xsave_limits *limits;
383 uint64_t cr4;
384 int error, enable_invpcid, level, width = 0, x2apic_id = 0;
385 unsigned int func, regs[4], logical_cpus = 0, param;
386 enum x2apic_state x2apic_state;
387 uint16_t cores, maxcpus, sockets, threads;
388
389 /*
390 * The function of CPUID is controlled through the provided value of
391 * %eax (and secondarily %ecx, for certain leaf data).
392 */
393 func = (uint32_t)*eax;
394 param = (uint32_t)*ecx;
395
396 /*
397 * Requests for invalid CPUID levels should map to the highest
398 * available level instead.
399 */
400 if (cpu_exthigh != 0 && func >= 0x80000000) {
401 if (func > cpu_exthigh)
402 func = cpu_exthigh;
403 } else if (func >= 0x40000000) {
404 if (func > CPUID_VM_HIGH)
405 func = CPUID_VM_HIGH;
406 } else if (func > cpu_high) {
407 func = cpu_high;
408 }
409
410 /*
411 * In general the approach used for CPU topology is to
412 * advertise a flat topology where all CPUs are packages with
413 * no multi-core or SMT.
414 */
415 switch (func) {
416 /*
417 * Pass these through to the guest
418 */
419 case CPUID_0000_0000:
420 case CPUID_0000_0002:
421 case CPUID_0000_0003:
422 case CPUID_8000_0000:
423 case CPUID_8000_0002:
424 case CPUID_8000_0003:
425 case CPUID_8000_0004:
426 case CPUID_8000_0006:
427 cpuid_count(func, param, regs);
428 break;
429 case CPUID_8000_0008:
430 cpuid_count(func, param, regs);
431 if (vmm_is_svm()) {
432 /*
433 * As on Intel (0000_0007:0, EDX), mask out
434 * unsupported or unsafe AMD extended features
435 * (8000_0008 EBX).
436 */
437 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF |
438 AMDFEID_XSAVEERPTR);
439
440 vm_get_topology(vm, &sockets, &cores, &threads,
441 &maxcpus);
442 /*
443 * Here, width is ApicIdCoreIdSize, present on
444 * at least Family 15h and newer. It
445 * represents the "number of bits in the
446 * initial apicid that indicate thread id
447 * within a package."
448 *
449 * Our topo_probe_amd() uses it for
450 * pkg_id_shift and other OSes may rely on it.
451 */
452 width = MIN(0xF, log2(threads * cores));
453 if (width < 0x4)
454 width = 0;
455 logical_cpus = MIN(0xFF, threads * cores - 1);
456 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) |
457 logical_cpus;
458 }
459 break;
460
461 case CPUID_8000_0001:
462 cpuid_count(func, param, regs);
463
464 /*
465 * Hide SVM from guest.
466 */
467 regs[2] &= ~AMDID2_SVM;
468
469 /*
470 * Don't advertise extended performance counter MSRs
471 * to the guest.
472 */
473 regs[2] &= ~AMDID2_PCXC;
474 regs[2] &= ~AMDID2_PNXC;
475 regs[2] &= ~AMDID2_PTSCEL2I;
476
477 /*
478 * Don't advertise Instruction Based Sampling feature.
479 */
480 regs[2] &= ~AMDID2_IBS;
481
482 /* NodeID MSR not available */
483 regs[2] &= ~AMDID2_NODE_ID;
484
485 /* Don't advertise the OS visible workaround feature */
486 regs[2] &= ~AMDID2_OSVW;
487
488 /* Hide mwaitx/monitorx capability from the guest */
489 regs[2] &= ~AMDID2_MWAITX;
490
491 #ifndef __FreeBSD__
492 /*
493 * Detection routines for TCE and FFXSR are missing
494 * from our vm_cpuid_capability() detection logic
495 * today. Mask them out until that is remedied.
496 * They do not appear to be in common usage, so their
497 * absence should not cause undue trouble.
498 */
499 regs[2] &= ~AMDID2_TCE;
500 regs[3] &= ~AMDID_FFXSR;
501 #endif
502
503 /*
504 * Hide rdtscp/ia32_tsc_aux until we know how
505 * to deal with them.
506 */
507 regs[3] &= ~AMDID_RDTSCP;
508 break;
509
510 case CPUID_8000_0007:
511 cpuid_count(func, param, regs);
512 /*
513 * AMD uses this leaf to advertise the processor's
514 * power monitoring and RAS capabilities. These
515 * features are hardware-specific and exposing
516 * them to a guest doesn't make a lot of sense.
517 *
518 * Intel uses this leaf only to advertise the
519 * "Invariant TSC" feature with all other bits
520 * being reserved (set to zero).
521 */
522 regs[0] = 0;
523 regs[1] = 0;
524 regs[2] = 0;
525
526 /*
527 * If the host system possesses an invariant TSC, then
528 * it is safe to expose to the guest.
529 *
530 * If there is measured skew between host TSCs, it will
531 * be properly offset so guests do not observe any
532 * change between CPU migrations.
533 */
534 regs[3] &= AMDPM_TSC_INVARIANT;
535
536 /*
537 * Since illumos avoids deep C-states on CPUs which do
538 * not support an invariant TSC, it may be safe (and
539 * desired) to unconditionally expose that capability to
540 * the guest.
541 */
542 if (vmm_force_invariant_tsc != 0) {
543 regs[3] |= AMDPM_TSC_INVARIANT;
544 }
545 break;
546
547 case CPUID_8000_001D:
548 /* AMD Cache topology, like 0000_0004 for Intel. */
549 if (!vmm_is_svm())
550 goto default_leaf;
551
552 /*
553 * Similar to Intel, generate a fictitious cache
554 * topology for the guest with L3 shared by the
555 * package, and L1 and L2 local to a core.
556 */
557 vm_get_topology(vm, &sockets, &cores, &threads,
558 &maxcpus);
559 switch (param) {
560 case 0:
561 logical_cpus = threads;
562 level = 1;
563 func = 1; /* data cache */
564 break;
565 case 1:
566 logical_cpus = threads;
567 level = 2;
568 func = 3; /* unified cache */
569 break;
570 case 2:
571 logical_cpus = threads * cores;
572 level = 3;
573 func = 3; /* unified cache */
574 break;
575 default:
576 logical_cpus = 0;
577 level = 0;
578 func = 0;
579 break;
580 }
581
582 if (level == 0) {
583 regs[0] = 0;
584 regs[1] = 0;
585 } else {
586 logical_cpus = MIN(0xfff, logical_cpus - 1);
587 regs[0] = (logical_cpus << 14) | (1 << 8) |
588 (level << 5) | func;
589 regs[1] = func > 0 ? _CACHE_LINE_SIZE - 1 : 0;
590 }
591 regs[2] = 0;
592 regs[3] = 0;
593 break;
594
595 case CPUID_8000_001E:
596 /*
597 * AMD Family 16h+ and Hygon Family 18h additional
598 * identifiers.
599 */
600 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16)
601 goto default_leaf;
602
603 vm_get_topology(vm, &sockets, &cores, &threads,
604 &maxcpus);
605 regs[0] = vcpu_id;
606 threads = MIN(0xFF, threads - 1);
607 regs[1] = (threads << 8) |
608 (vcpu_id >> log2(threads + 1));
609 /*
610 * XXX Bhyve topology cannot yet represent >1 node per
611 * processor.
612 */
613 regs[2] = 0;
614 regs[3] = 0;
615 break;
616
617 case CPUID_0000_0001:
618 do_cpuid(1, regs);
619
620 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
621 VERIFY0(error);
622
623 /*
624 * Override the APIC ID only in ebx
625 */
626 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
627 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
628
629 /*
630 * Don't expose VMX, SpeedStep, TME or SMX capability.
631 * Advertise x2APIC capability and Hypervisor guest.
632 */
633 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
634 regs[2] &= ~(CPUID2_SMX);
635
636 regs[2] |= CPUID2_HV;
637
638 if (x2apic_state != X2APIC_DISABLED)
639 regs[2] |= CPUID2_X2APIC;
640 else
641 regs[2] &= ~CPUID2_X2APIC;
642
643 /*
644 * Only advertise CPUID2_XSAVE in the guest if
645 * the host is using XSAVE.
646 */
647 if (!(regs[2] & CPUID2_OSXSAVE))
648 regs[2] &= ~CPUID2_XSAVE;
649
650 /*
651 * If CPUID2_XSAVE is being advertised and the
652 * guest has set CR4_XSAVE, set
653 * CPUID2_OSXSAVE.
654 */
655 regs[2] &= ~CPUID2_OSXSAVE;
656 if (regs[2] & CPUID2_XSAVE) {
657 error = vm_get_register(vm, vcpu_id,
658 VM_REG_GUEST_CR4, &cr4);
659 VERIFY0(error);
660 if (cr4 & CR4_XSAVE)
661 regs[2] |= CPUID2_OSXSAVE;
662 }
663
664 /*
665 * Hide monitor/mwait until we know how to deal with
666 * these instructions.
667 */
668 regs[2] &= ~CPUID2_MON;
669
670 /*
671 * Hide the performance and debug features.
672 */
673 regs[2] &= ~CPUID2_PDCM;
674
675 /*
676 * No TSC deadline support in the APIC yet
677 */
678 regs[2] &= ~CPUID2_TSCDLT;
679
680 /*
681 * Hide thermal monitoring
682 */
683 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
684
685 /*
686 * Hide the debug store capability.
687 */
688 regs[3] &= ~CPUID_DS;
689
690 /*
691 * Advertise the Machine Check and MTRR capability.
692 *
693 * Some guest OSes (e.g. Windows) will not boot if
694 * these features are absent.
695 */
696 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
697
698 vm_get_topology(vm, &sockets, &cores, &threads,
699 &maxcpus);
700 logical_cpus = threads * cores;
701 regs[1] &= ~CPUID_HTT_CORES;
702 regs[1] |= (logical_cpus & 0xff) << 16;
703 regs[3] |= CPUID_HTT;
704 break;
705
706 case CPUID_0000_0004:
707 cpuid_count(func, param, regs);
708
709 if (regs[0] || regs[1] || regs[2] || regs[3]) {
710 vm_get_topology(vm, &sockets, &cores, &threads,
711 &maxcpus);
712 regs[0] &= 0x3ff;
713 regs[0] |= (cores - 1) << 26;
714 /*
715 * Cache topology:
716 * - L1 and L2 are shared only by the logical
717 * processors in a single core.
718 * - L3 and above are shared by all logical
719 * processors in the package.
720 */
721 logical_cpus = threads;
722 level = (regs[0] >> 5) & 0x7;
723 if (level >= 3)
724 logical_cpus *= cores;
725 regs[0] |= (logical_cpus - 1) << 14;
726 }
727 break;
728
729 case CPUID_0000_0007:
730 regs[0] = 0;
731 regs[1] = 0;
732 regs[2] = 0;
733 regs[3] = 0;
734
735 /* leaf 0 */
736 if (param == 0) {
737 cpuid_count(func, param, regs);
738
739 /* Only leaf 0 is supported */
740 regs[0] = 0;
741
742 /*
743 * Expose known-safe features.
744 */
745 regs[1] &= CPUID_STDEXT_FSGSBASE |
746 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
747 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP |
748 CPUID_STDEXT_BMI2 |
749 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
750 CPUID_STDEXT_AVX512F |
751 CPUID_STDEXT_AVX512DQ |
752 CPUID_STDEXT_RDSEED |
753 CPUID_STDEXT_SMAP |
754 CPUID_STDEXT_AVX512PF |
755 CPUID_STDEXT_AVX512ER |
756 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA |
757 CPUID_STDEXT_AVX512BW |
758 CPUID_STDEXT_AVX512VL;
759 regs[2] &= CPUID_STDEXT2_VAES |
760 CPUID_STDEXT2_VPCLMULQDQ;
761 regs[3] &= CPUID_STDEXT3_MD_CLEAR;
762
763 /* Advertise INVPCID if it is enabled. */
764 error = vm_get_capability(vm, vcpu_id,
765 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
766 if (error == 0 && enable_invpcid)
767 regs[1] |= CPUID_STDEXT_INVPCID;
768 }
769 break;
770
771 case CPUID_0000_0006:
772 regs[0] = CPUTPM1_ARAT;
773 regs[1] = 0;
774 regs[2] = 0;
775 regs[3] = 0;
776 break;
777
778 case CPUID_0000_000A:
779 /*
780 * Handle the access, but report 0 for
781 * all options
782 */
783 regs[0] = 0;
784 regs[1] = 0;
785 regs[2] = 0;
786 regs[3] = 0;
787 break;
788
789 case CPUID_0000_000B:
790 /*
791 * Intel processor topology enumeration
792 */
793 if (vmm_is_intel()) {
794 vm_get_topology(vm, &sockets, &cores, &threads,
795 &maxcpus);
796 if (param == 0) {
797 logical_cpus = threads;
798 width = log2(logical_cpus);
799 level = CPUID_TYPE_SMT;
800 x2apic_id = vcpu_id;
801 }
802
803 if (param == 1) {
804 logical_cpus = threads * cores;
805 width = log2(logical_cpus);
806 level = CPUID_TYPE_CORE;
807 x2apic_id = vcpu_id;
808 }
809
810 if (param >= 2) {
811 width = 0;
812 logical_cpus = 0;
813 level = 0;
814 x2apic_id = 0;
815 }
816
817 regs[0] = width & 0x1f;
818 regs[1] = logical_cpus & 0xffff;
819 regs[2] = (level << 8) | (param & 0xff);
820 regs[3] = x2apic_id;
821 } else {
822 regs[0] = 0;
823 regs[1] = 0;
824 regs[2] = 0;
825 regs[3] = 0;
826 }
827 break;
828
829 case CPUID_0000_000D:
830 limits = vmm_get_xsave_limits();
831 if (!limits->xsave_enabled) {
832 regs[0] = 0;
833 regs[1] = 0;
834 regs[2] = 0;
835 regs[3] = 0;
836 break;
837 }
838
839 cpuid_count(func, param, regs);
840 switch (param) {
841 case 0:
842 /*
843 * Only permit the guest to use bits
844 * that are active in the host in
845 * %xcr0. Also, claim that the
846 * maximum save area size is
847 * equivalent to the host's current
848 * save area size. Since this runs
849 * "inside" of vmrun(), it runs with
850 * the guest's xcr0, so the current
851 * save area size is correct as-is.
852 */
853 regs[0] &= limits->xcr0_allowed;
854 regs[2] = limits->xsave_max_size;
855 regs[3] &= (limits->xcr0_allowed >> 32);
856 break;
857 case 1:
858 /* Only permit XSAVEOPT. */
859 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
860 regs[1] = 0;
861 regs[2] = 0;
862 regs[3] = 0;
863 break;
864 default:
865 /*
866 * If the leaf is for a permitted feature,
867 * pass through as-is, otherwise return
868 * all zeroes.
869 */
870 if (!(limits->xcr0_allowed & (1ul << param))) {
871 regs[0] = 0;
872 regs[1] = 0;
873 regs[2] = 0;
874 regs[3] = 0;
875 }
876 break;
877 }
878 break;
879
880 case CPUID_0000_000F:
881 case CPUID_0000_0010:
882 /*
883 * Do not report any Resource Director Technology
884 * capabilities. Exposing control of cache or memory
885 * controller resource partitioning to the guest is not
886 * at all sensible.
887 *
888 * This is already hidden at a high level by masking of
889 * leaf 0x7. Even still, a guest may look here for
890 * detailed capability information.
891 */
892 regs[0] = 0;
893 regs[1] = 0;
894 regs[2] = 0;
895 regs[3] = 0;
896 break;
897
898 case CPUID_0000_0015:
899 /*
900 * Don't report CPU TSC/Crystal ratio and clock
901 * values since guests may use these to derive the
902 * local APIC frequency..
903 */
904 regs[0] = 0;
905 regs[1] = 0;
906 regs[2] = 0;
907 regs[3] = 0;
908 break;
909
910 case 0x40000000:
911 regs[0] = CPUID_VM_HIGH;
912 bcopy(bhyve_id, ®s[1], 4);
913 bcopy(bhyve_id + 4, ®s[2], 4);
914 bcopy(bhyve_id + 8, ®s[3], 4);
915 break;
916
917 default:
918 default_leaf:
919 /*
920 * The leaf value has already been clamped so
921 * simply pass this through.
922 */
923 cpuid_count(func, param, regs);
924 break;
925 }
926
927 *eax = regs[0];
928 *ebx = regs[1];
929 *ecx = regs[2];
930 *edx = regs[3];
931 }
932