1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 /*
29 * This file and its contents are supplied under the terms of the
30 * Common Development and Distribution License ("CDDL"), version 1.0.
31 * You may only use this file in accordance with the terms of version
32 * 1.0 of the CDDL.
33 *
34 * A full copy of the text of the CDDL should have accompanied this
35 * source. A copy of the CDDL is also available via the Internet at
36 * http://www.illumos.org/license/CDDL.
37 *
38 * Copyright 2014 Pluribus Networks Inc.
39 * Copyright 2018 Joyent, Inc.
40 * Copyright 2025 Oxide Computer Company
41 */
42
43 #include <sys/types.h>
44 #include <sys/stdbool.h>
45 #include <sys/errno.h>
46
47 #include <machine/md_var.h>
48 #include <machine/specialreg.h>
49
50 #include <machine/vmm.h>
51 #include <sys/vmm_kernel.h>
52
53 #include "vmm_host.h"
54 #include "vmm_util.h"
55 #include "vlapic.h"
56
57 /*
58 * CPUID Emulation
59 *
60 * All CPUID instruction exits are handled by the in-kernel emulation.
61 *
62 * ----------------
63 * Legacy Emulation
64 * ----------------
65 *
66 * Originally, the kernel vmm portion of bhyve relied on fixed logic to filter
67 * and/or generate CPUID results based on what was reported by the host CPU, as
68 * well as attributes of the VM (such as CPU topology, and enabled features).
69 * This is largely adequate to expose CPU capabilities to the guest in manner
70 * which allows it to operate properly.
71 *
72 * ------------------------------
73 * Userspace-Controlled Emulation
74 * ------------------------------
75 *
76 * In certain situations, more control over the CPUID emulation results present
77 * to the guest is desired. Live migration between physical hosts is one such
78 * example, where the underlying CPUs, or at least their microcode, may differ
79 * between the source and destination. In such cases, where changes to the
80 * CPUID results cannot be tolerated, the userspace portion of the VMM can be in
81 * complete control over the leaves which are presented to the guest. It may
82 * still consult the "legacy" CPUID data for guidance about which CPU features
83 * are safe to expose (due to hypervisor limitations, etc). This leaf
84 * information is configured on a per-vCPU basis.
85 *
86 * The emulation entries provided by userspace are expected to be in sorted
87 * order, running from lowest function and index to highest.
88 *
89 * For example:
90 * (func: 00h idx: 00h) ->
91 * (flags: 0, eax: highest std leaf, ebx-edx: vendor id)
92 * (func: 0Dh idx: 00h) ->
93 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XCR0/XSAVE info)
94 * (func: 0Dh idx: 01h) ->
95 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: XSAVE/XSAVEOPT details)
96 * ...
97 * (func: 0Dh idx: 07H) ->
98 * (flags: VCE_FLAG_MATCH_INDEX, eax - edx: AVX-512 details)
99 * (func: 8000000h idx: 0h) ->
100 * (flags: 0, eax: highest extd leaf ...)
101 * ...
102 */
103
104
105 #define CPUID_TYPE_MASK 0xf0000000
106 #define CPUID_TYPE_STD 0x00000000
107 #define CPUID_TYPE_EXTD 0x80000000
108
109 #define CPUID_0000_0000 (0x0)
110 #define CPUID_0000_0001 (0x1)
111 #define CPUID_0000_0002 (0x2)
112 #define CPUID_0000_0003 (0x3)
113 #define CPUID_0000_0004 (0x4)
114 #define CPUID_0000_0006 (0x6)
115 #define CPUID_0000_0007 (0x7)
116 #define CPUID_0000_000A (0xA)
117 #define CPUID_0000_000B (0xB)
118 #define CPUID_0000_000D (0xD)
119 #define CPUID_0000_000F (0xF)
120 #define CPUID_0000_0010 (0x10)
121 #define CPUID_0000_0015 (0x15)
122 #define CPUID_8000_0000 (0x80000000)
123 #define CPUID_8000_0001 (0x80000001)
124 #define CPUID_8000_0002 (0x80000002)
125 #define CPUID_8000_0003 (0x80000003)
126 #define CPUID_8000_0004 (0x80000004)
127 #define CPUID_8000_0006 (0x80000006)
128 #define CPUID_8000_0007 (0x80000007)
129 #define CPUID_8000_0008 (0x80000008)
130 #define CPUID_8000_001D (0x8000001D)
131 #define CPUID_8000_001E (0x8000001E)
132
133 #define CPUID_VM_HIGH 0x40000000
134
135 static const struct vcpu_cpuid_entry cpuid_empty_entry = { 0 };
136
137 /*
138 * Given the CPUID configuration for a vCPU, locate the entry which matches the
139 * provided function/index tuple. The entries list is walked in order, and the
140 * first valid match based on the function/index and flags will be emitted.
141 *
142 * If no match is found, but Intel-style fallback is configured, then the
143 * highest standard leaf encountered will be emitted.
144 */
145 static const struct vcpu_cpuid_entry *
cpuid_find_entry(const vcpu_cpuid_config_t * cfg,uint32_t func,uint32_t idx)146 cpuid_find_entry(const vcpu_cpuid_config_t *cfg, uint32_t func, uint32_t idx)
147 {
148 const struct vcpu_cpuid_entry *last_std = NULL;
149 const bool intel_fallback =
150 (cfg->vcc_flags & VCC_FLAG_INTEL_FALLBACK) != 0;
151 bool matched_leaf = false;
152
153 ASSERT0(cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING);
154
155 for (uint_t i = 0; i < cfg->vcc_nent; i++) {
156 const struct vcpu_cpuid_entry *ent = &cfg->vcc_entries[i];
157 const bool ent_is_std =
158 (ent->vce_function & CPUID_TYPE_MASK) == CPUID_TYPE_STD;
159 const bool ent_must_match_idx =
160 (ent->vce_flags & VCE_FLAG_MATCH_INDEX) != 0;
161
162 if (ent_is_std) {
163 /*
164 * Keep track of the last "standard" leaf for
165 * Intel-style fallback behavior.
166 *
167 * This does currently not account for the sub-leaf
168 * index matching behavior for fallback described in the
169 * SDM. It is not clear if any consumers rely on such
170 * matching when encountering fallback.
171 */
172 last_std = ent;
173 }
174 if (ent->vce_function == func) {
175 if (ent->vce_index == idx || !ent_must_match_idx) {
176 return (ent);
177 }
178 /*
179 * Make note of when the top-level leaf matches, even
180 * when the index does not.
181 */
182 matched_leaf = true;
183 } else if (ent->vce_function > func) {
184 if ((ent->vce_function & CPUID_TYPE_MASK) ==
185 (func & CPUID_TYPE_MASK)) {
186 /*
187 * We are beyond a valid leaf to match, but have
188 * not exceeded the maximum leaf for this "type"
189 * (standard, extended, hvm, etc), so return an
190 * empty entry.
191 */
192 return (&cpuid_empty_entry);
193 } else {
194 /*
195 * Otherwise, we can stop now, having gone
196 * beyond the last entry which could match the
197 * target function in a sorted list.
198 */
199 break;
200 }
201 }
202 }
203
204 if (matched_leaf || !intel_fallback) {
205 return (&cpuid_empty_entry);
206 } else {
207 return (last_std);
208 }
209 }
210
211 /*
212 * Updates a previously-populated set of CPUID return values to account for the
213 * runtime state of the executing vCPU, i.e., the values in its control
214 * registers and MSRs that influence the values returned by the CPUID
215 * instruction.
216 *
217 * This function does not account for "static" properties of the vCPU or VM,
218 * such as the enablement of VM-wide features and capabilities (like x2APIC or
219 * INVPCID support) or settings that vary only with the vCPU's ID (like the
220 * values returned from its topology leaves).
221 *
222 * This function assumes that it is called from within VMRUN(), which guarantees
223 * that the guest's FPU state is loaded. This is required to obtain the correct
224 * values for leaves whose values depend on the guest values of %xcr0 and the
225 * IA32_XSS MSR.
226 */
227 static void
cpuid_apply_runtime_reg_state(struct vm * vm,int vcpuid,uint32_t func,uint32_t index,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)228 cpuid_apply_runtime_reg_state(struct vm *vm, int vcpuid, uint32_t func,
229 uint32_t index, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
230 {
231 uint64_t cr4;
232 int error;
233 unsigned int regs[4];
234
235 switch (func) {
236 case CPUID_0000_0001:
237 /*
238 * If CPUID2_XSAVE is being advertised and the
239 * guest has set CR4_XSAVE, set CPUID2_OSXSAVE.
240 */
241 *ecx &= ~CPUID2_OSXSAVE;
242 if ((*ecx & CPUID2_XSAVE) != 0) {
243 error = vm_get_register(vm, vcpuid,
244 VM_REG_GUEST_CR4, &cr4);
245 VERIFY0(error);
246 if ((cr4 & CR4_XSAVE) != 0) {
247 *ecx |= CPUID2_OSXSAVE;
248 }
249 }
250
251 /*
252 * AMD APM vol. 3 rev. 3.36 section E.3.2 notes that this bit is
253 * set only if the "APIC exists and is enabled." Vol. 3 of the
254 * June 2024 Intel SDM notes in section 11.4.3 that "[t]he CPUID
255 * feature flag for the APIC ... is also set to 0" when the APIC
256 * enable bit is cleared.
257 */
258 if (vlapic_hw_disabled(vm_lapic(vm, vcpuid))) {
259 *edx &= ~CPUID_APIC;
260 }
261 break;
262
263 case CPUID_0000_000D:
264 /*
265 * Leaf D reports XSAVE area sizes that vary with the current
266 * value of %xcr0. Since this function is called with %xcr0
267 * still set to its guest value, the easiest way to get the
268 * correct output is to execute CPUID on the host and copy out
269 * the relevant values.
270 */
271 cpuid_count(func, index, regs);
272 switch (index) {
273 case 0:
274 /*
275 * %eax, %ecx, and %edx return information about the
276 * complete set of features the processor supports, not
277 * just the ones that are enabled. The caller is
278 * presumed to have set these already, so just update
279 * %ebx.
280 */
281 *ebx = regs[1];
282 break;
283 case 1:
284 /*
285 * Subleaf 1 reports the XSAVE area size required for
286 * features enabled in %xcr0 and the IA32_XSS MSR via
287 * %ebx. As with subleaf 0, the caller is presumed to
288 * have set the other three output register values
289 * already.
290 *
291 * AMD APM vol. 3 rev. 3.36 and the June 2024 edition of
292 * volume 2 of the Intel SDM specify slightly different
293 * behavior here: the SDM says that the value returned
294 * in %ebx depends in part on whether %eax advertises
295 * XSAVEC and IA32_XSS support, but the APM does not. To
296 * handle these cases:
297 *
298 * 1. If the guest isn't a VMX guest, just copy the
299 * current reported save area size.
300 * 2. If both the XSAVEC and XSAVES bits are clear in
301 * %eax, return a save area size of 0 in %ebx to
302 * match the SDM description.
303 * 3. Otherwise, copy the host's reported save area
304 * size.
305 *
306 * Note that, because XSAVES saves a superset of the
307 * state saved by XSAVEC, it's OK to report the host's
308 * save area size even if the host and guest report
309 * different feature bits in %eax:
310 *
311 * - If the host supports XSAVES and the guest doesn't,
312 * the reported save area size will be too large, but
313 * the guest can still use XSAVEC safely.
314 * - If the VM's explicit CPUID values advertise XSAVES
315 * support, but the host doesn't support XSAVES, the
316 * host's reported save area size will still be large
317 * enough for the xcr0-controlled state saved by
318 * XSAVEC. The area will be undersized for XSAVES,
319 * but this is OK because the guest can't execute
320 * XSAVES anyway (it will #UD).
321 */
322 if (!vmm_is_intel()) {
323 *ebx = regs[1];
324 } else {
325 if ((*eax & (CPUID_EXTSTATE_XSAVEC |
326 CPUID_EXTSTATE_XSAVES)) == 0) {
327 *ebx = 0;
328 } else {
329 *ebx = regs[1];
330 }
331 }
332 break;
333 default:
334 /*
335 * Other subleaves of leaf D report the relative sizes
336 * and offsets of the state required for specific
337 * features in the relevant offset masks. These don't
338 * depend on the current enabled features (only the
339 * supported ones), so no enabled-feature specialization
340 * is required.
341 */
342 break;
343 }
344 break;
345 }
346 }
347
348 /*
349 * Emulates the CPUID instruction on the specified vCPU and returns its outputs
350 * in the rax/rbx/rcx/rdx variables.
351 *
352 * This function assumes it is called from within VMRUN(), which guarantees that
353 * certain guest state (e.g. FPU state) remains loaded.
354 */
355 void
vcpu_emulate_cpuid(struct vm * vm,int vcpuid,uint64_t * rax,uint64_t * rbx,uint64_t * rcx,uint64_t * rdx)356 vcpu_emulate_cpuid(struct vm *vm, int vcpuid, uint64_t *rax, uint64_t *rbx,
357 uint64_t *rcx, uint64_t *rdx)
358 {
359 const vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid);
360 uint32_t func, index;
361
362 ASSERT3P(rax, !=, NULL);
363 ASSERT3P(rbx, !=, NULL);
364 ASSERT3P(rcx, !=, NULL);
365 ASSERT3P(rdx, !=, NULL);
366
367 uint32_t regs[4] = { *rax, 0, *rcx, 0 };
368 func = (uint32_t)*rax;
369 index = (uint32_t)*rcx;
370
371 /* Fall back to legacy handling if specified */
372 if ((cfg->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0) {
373 legacy_emulate_cpuid(vm, vcpuid, ®s[0], ®s[1], ®s[2],
374 ®s[3]);
375 } else {
376 const struct vcpu_cpuid_entry *ent = cpuid_find_entry(cfg, func,
377 index);
378 ASSERT(ent != NULL);
379
380 /*
381 * The function and index in the found entry may differ from
382 * what the guest requested (if the entry was chosen via the
383 * "highest leaf" fallback described above). Use the values
384 * from the entry to ensure that the correct vCPU state fixups
385 * get applied below.
386 *
387 * The found entry may also be an all-zero empty entry (if the
388 * requested leaf is invalid but is less than the maximum valid
389 * leaf). It's OK to fall through in this case because leaf 0
390 * never has any CPU state-based fixups to apply.
391 */
392 func = ent->vce_function;
393 index = ent->vce_index;
394 regs[0] = ent->vce_eax;
395 regs[1] = ent->vce_ebx;
396 regs[2] = ent->vce_ecx;
397 regs[3] = ent->vce_edx;
398 }
399
400 /* Fix up any returned values that vary with guest register state. */
401 cpuid_apply_runtime_reg_state(vm, vcpuid, func, index, ®s[0],
402 ®s[1], ®s[2], ®s[3]);
403
404 /* CPUID clears the upper 32-bits of the long-mode registers. */
405 *rax = regs[0];
406 *rbx = regs[1];
407 *rcx = regs[2];
408 *rdx = regs[3];
409 }
410
411 /*
412 * Get the current CPUID emulation configuration for this vCPU.
413 *
414 * Only the existing flags will be emitted if the vCPU is configured for legacy
415 * operation via the VCC_FLAG_LEGACY_HANDLING flag. If in userspace-controlled
416 * mode, then we will attempt to copy the existing entries into vcc_entries,
417 * its side specified by vcc_nent.
418 *
419 * Regardless of whether vcc_entries is adequately sized (or even present),
420 * vcc_nent will be set to the number of existing entries.
421 */
422 int
vm_get_cpuid(struct vm * vm,int vcpuid,vcpu_cpuid_config_t * res)423 vm_get_cpuid(struct vm *vm, int vcpuid, vcpu_cpuid_config_t *res)
424 {
425 if (vcpuid < 0 || vcpuid > VM_MAXCPU) {
426 return (EINVAL);
427 }
428
429 const vcpu_cpuid_config_t *src = vm_cpuid_config(vm, vcpuid);
430 if (src->vcc_nent > res->vcc_nent) {
431 res->vcc_nent = src->vcc_nent;
432 return (E2BIG);
433 } else if (src->vcc_nent != 0) {
434 bcopy(src->vcc_entries, res->vcc_entries,
435 src->vcc_nent * sizeof (struct vcpu_cpuid_entry));
436 }
437 res->vcc_flags = src->vcc_flags;
438 res->vcc_nent = src->vcc_nent;
439 return (0);
440 }
441
442 /*
443 * Set the CPUID emulation configuration for this vCPU.
444 *
445 * If VCC_FLAG_LEGACY_HANDLING is set in vcc_flags, then vcc_nent is expected to
446 * be set to 0, as configuring a list of entries would be useless when using the
447 * legacy handling.
448 *
449 * Any existing entries which are configured are freed, and the newly provided
450 * ones will be copied into their place.
451 */
452 int
vm_set_cpuid(struct vm * vm,int vcpuid,const vcpu_cpuid_config_t * src)453 vm_set_cpuid(struct vm *vm, int vcpuid, const vcpu_cpuid_config_t *src)
454 {
455 if (vcpuid < 0 || vcpuid > VM_MAXCPU) {
456 return (EINVAL);
457 }
458 if (src->vcc_nent > VMM_MAX_CPUID_ENTRIES) {
459 return (EINVAL);
460 }
461 if ((src->vcc_flags & ~VCC_FLAGS_VALID) != 0) {
462 return (EINVAL);
463 }
464 if ((src->vcc_flags & VCC_FLAG_LEGACY_HANDLING) != 0 &&
465 src->vcc_nent != 0) {
466 /* No entries should be provided if using legacy handling */
467 return (EINVAL);
468 }
469 for (uint_t i = 0; i < src->vcc_nent; i++) {
470 /* Ensure all entries carry valid flags */
471 if ((src->vcc_entries[i].vce_flags & ~VCE_FLAGS_VALID) != 0) {
472 return (EINVAL);
473 }
474 }
475
476 vcpu_cpuid_config_t *cfg = vm_cpuid_config(vm, vcpuid);
477
478 /* Free any existing entries first */
479 vcpu_cpuid_cleanup(cfg);
480
481 /* Copy supplied entries into freshly allocated space */
482 if (src->vcc_nent != 0) {
483 const size_t entries_sz =
484 src->vcc_nent * sizeof (struct vcpu_cpuid_entry);
485
486 cfg->vcc_nent = src->vcc_nent;
487 cfg->vcc_entries = kmem_alloc(entries_sz, KM_SLEEP);
488 bcopy(src->vcc_entries, cfg->vcc_entries, entries_sz);
489 }
490 cfg->vcc_flags = src->vcc_flags;
491
492 return (0);
493 }
494
495 void
vcpu_cpuid_init(vcpu_cpuid_config_t * cfg)496 vcpu_cpuid_init(vcpu_cpuid_config_t *cfg)
497 {
498 /* Default to legacy-style handling */
499 cfg->vcc_flags = VCC_FLAG_LEGACY_HANDLING;
500 cfg->vcc_nent = 0;
501 cfg->vcc_entries = NULL;
502 }
503
504 void
vcpu_cpuid_cleanup(vcpu_cpuid_config_t * cfg)505 vcpu_cpuid_cleanup(vcpu_cpuid_config_t *cfg)
506 {
507 if (cfg->vcc_nent != 0) {
508 ASSERT3P(cfg->vcc_entries, !=, NULL);
509
510 kmem_free(cfg->vcc_entries,
511 cfg->vcc_nent * sizeof (struct vcpu_cpuid_entry));
512
513 cfg->vcc_nent = 0;
514 cfg->vcc_entries = NULL;
515 }
516 }
517
518 static const char bhyve_id[12] = "bhyve bhyve ";
519
520 /*
521 * Force exposition of the invariant TSC capability, regardless of whether the
522 * host CPU reports having it.
523 */
524 static int vmm_force_invariant_tsc = 0;
525
526 /*
527 * CPUID instruction Fn0000_0001:
528 */
529 #define CPUID_0000_0001_APICID_SHIFT 24
530
531
532 /*
533 * Compute ceil(log2(x)). Returns -1 if x is zero.
534 */
535 static __inline int
log2(uint_t x)536 log2(uint_t x)
537 {
538 return (x == 0 ? -1 : fls(x - 1));
539 }
540
541 /*
542 * The "legacy" bhyve cpuid emulation, which largly applies statically defined
543 * masks to the data provided by the host CPU.
544 */
545 void
legacy_emulate_cpuid(struct vm * vm,int vcpu_id,uint32_t * eax,uint32_t * ebx,uint32_t * ecx,uint32_t * edx)546 legacy_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx,
547 uint32_t *ecx, uint32_t *edx)
548 {
549 const struct xsave_limits *limits;
550 int error, enable_invpcid, level, width = 0, x2apic_id = 0;
551 unsigned int func, regs[4], logical_cpus = 0, param;
552 enum x2apic_state x2apic_state;
553 uint16_t cores, maxcpus, sockets, threads;
554
555 /*
556 * The function of CPUID is controlled through the provided value of
557 * %eax (and secondarily %ecx, for certain leaf data).
558 */
559 func = (uint32_t)*eax;
560 param = (uint32_t)*ecx;
561
562 /*
563 * Requests for invalid CPUID levels should map to the highest
564 * available level instead.
565 */
566 if (cpu_exthigh != 0 && func >= 0x80000000) {
567 if (func > cpu_exthigh)
568 func = cpu_exthigh;
569 } else if (func >= 0x40000000) {
570 if (func > CPUID_VM_HIGH)
571 func = CPUID_VM_HIGH;
572 } else if (func > cpu_high) {
573 func = cpu_high;
574 }
575
576 /*
577 * In general the approach used for CPU topology is to
578 * advertise a flat topology where all CPUs are packages with
579 * no multi-core or SMT.
580 */
581 switch (func) {
582 /*
583 * Pass these through to the guest
584 */
585 case CPUID_0000_0000:
586 case CPUID_0000_0002:
587 case CPUID_0000_0003:
588 case CPUID_8000_0000:
589 case CPUID_8000_0002:
590 case CPUID_8000_0003:
591 case CPUID_8000_0004:
592 case CPUID_8000_0006:
593 cpuid_count(func, param, regs);
594 break;
595 case CPUID_8000_0008:
596 cpuid_count(func, param, regs);
597 if (vmm_is_svm()) {
598 /*
599 * As on Intel (0000_0007:0, EDX), mask out
600 * unsupported or unsafe AMD extended features
601 * (8000_0008 EBX).
602 */
603 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF |
604 AMDFEID_XSAVEERPTR);
605
606 vm_get_topology(vm, &sockets, &cores, &threads,
607 &maxcpus);
608 /*
609 * Here, width is ApicIdCoreIdSize, present on
610 * at least Family 15h and newer. It
611 * represents the "number of bits in the
612 * initial apicid that indicate thread id
613 * within a package."
614 *
615 * Our topo_probe_amd() uses it for
616 * pkg_id_shift and other OSes may rely on it.
617 */
618 width = MIN(0xF, log2(threads * cores));
619 if (width < 0x4)
620 width = 0;
621 logical_cpus = MIN(0xFF, threads * cores - 1);
622 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) |
623 logical_cpus;
624 }
625 break;
626
627 case CPUID_8000_0001:
628 cpuid_count(func, param, regs);
629
630 /*
631 * Hide SVM from guest.
632 */
633 regs[2] &= ~AMDID2_SVM;
634
635 /*
636 * Don't advertise extended performance counter MSRs
637 * to the guest.
638 */
639 regs[2] &= ~AMDID2_PCXC;
640 regs[2] &= ~AMDID2_PNXC;
641 regs[2] &= ~AMDID2_PTSCEL2I;
642
643 /*
644 * Don't advertise Instruction Based Sampling feature.
645 */
646 regs[2] &= ~AMDID2_IBS;
647
648 /* NodeID MSR not available */
649 regs[2] &= ~AMDID2_NODE_ID;
650
651 /* Don't advertise the OS visible workaround feature */
652 regs[2] &= ~AMDID2_OSVW;
653
654 /* Hide mwaitx/monitorx capability from the guest */
655 regs[2] &= ~AMDID2_MWAITX;
656
657 #ifndef __FreeBSD__
658 /*
659 * Detection routines for TCE and FFXSR are missing
660 * from our vm_cpuid_capability() detection logic
661 * today. Mask them out until that is remedied.
662 * They do not appear to be in common usage, so their
663 * absence should not cause undue trouble.
664 */
665 regs[2] &= ~AMDID2_TCE;
666 regs[3] &= ~AMDID_FFXSR;
667 #endif
668
669 /*
670 * Hide rdtscp/ia32_tsc_aux until we know how
671 * to deal with them.
672 */
673 regs[3] &= ~AMDID_RDTSCP;
674 break;
675
676 case CPUID_8000_0007:
677 cpuid_count(func, param, regs);
678 /*
679 * AMD uses this leaf to advertise the processor's
680 * power monitoring and RAS capabilities. These
681 * features are hardware-specific and exposing
682 * them to a guest doesn't make a lot of sense.
683 *
684 * Intel uses this leaf only to advertise the
685 * "Invariant TSC" feature with all other bits
686 * being reserved (set to zero).
687 */
688 regs[0] = 0;
689 regs[1] = 0;
690 regs[2] = 0;
691
692 /*
693 * If the host system possesses an invariant TSC, then
694 * it is safe to expose to the guest.
695 *
696 * If there is measured skew between host TSCs, it will
697 * be properly offset so guests do not observe any
698 * change between CPU migrations.
699 */
700 regs[3] &= AMDPM_TSC_INVARIANT;
701
702 /*
703 * Since illumos avoids deep C-states on CPUs which do
704 * not support an invariant TSC, it may be safe (and
705 * desired) to unconditionally expose that capability to
706 * the guest.
707 */
708 if (vmm_force_invariant_tsc != 0) {
709 regs[3] |= AMDPM_TSC_INVARIANT;
710 }
711 break;
712
713 case CPUID_8000_001D:
714 /* AMD Cache topology, like 0000_0004 for Intel. */
715 if (!vmm_is_svm())
716 goto default_leaf;
717
718 /*
719 * Similar to Intel, generate a fictitious cache
720 * topology for the guest with L3 shared by the
721 * package, and L1 and L2 local to a core.
722 */
723 vm_get_topology(vm, &sockets, &cores, &threads,
724 &maxcpus);
725 switch (param) {
726 case 0:
727 logical_cpus = threads;
728 level = 1;
729 func = 1; /* data cache */
730 break;
731 case 1:
732 logical_cpus = threads;
733 level = 2;
734 func = 3; /* unified cache */
735 break;
736 case 2:
737 logical_cpus = threads * cores;
738 level = 3;
739 func = 3; /* unified cache */
740 break;
741 default:
742 logical_cpus = 0;
743 level = 0;
744 func = 0;
745 break;
746 }
747
748 if (level == 0) {
749 regs[0] = 0;
750 regs[1] = 0;
751 } else {
752 logical_cpus = MIN(0xfff, logical_cpus - 1);
753 regs[0] = (logical_cpus << 14) | (1 << 8) |
754 (level << 5) | func;
755 regs[1] = func > 0 ? _CACHE_LINE_SIZE - 1 : 0;
756 }
757 regs[2] = 0;
758 regs[3] = 0;
759 break;
760
761 case CPUID_8000_001E:
762 /*
763 * AMD Family 16h+ and Hygon Family 18h additional
764 * identifiers.
765 */
766 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16)
767 goto default_leaf;
768
769 vm_get_topology(vm, &sockets, &cores, &threads,
770 &maxcpus);
771 regs[0] = vcpu_id;
772 threads = MIN(0xFF, threads - 1);
773 regs[1] = (threads << 8) |
774 (vcpu_id >> log2(threads + 1));
775 /*
776 * XXX Bhyve topology cannot yet represent >1 node per
777 * processor.
778 */
779 regs[2] = 0;
780 regs[3] = 0;
781 break;
782
783 case CPUID_0000_0001:
784 do_cpuid(1, regs);
785
786 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
787 VERIFY0(error);
788
789 /*
790 * Override the APIC ID only in ebx
791 */
792 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
793 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
794
795 /*
796 * Don't expose VMX, SpeedStep, TME or SMX capability.
797 * Advertise x2APIC capability and Hypervisor guest.
798 */
799 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
800 regs[2] &= ~(CPUID2_SMX);
801
802 regs[2] |= CPUID2_HV;
803
804 if (x2apic_state != X2APIC_DISABLED)
805 regs[2] |= CPUID2_X2APIC;
806 else
807 regs[2] &= ~CPUID2_X2APIC;
808
809 /*
810 * Only advertise CPUID2_XSAVE in the guest if
811 * the host is using XSAVE.
812 */
813 if (!(regs[2] & CPUID2_OSXSAVE))
814 regs[2] &= ~CPUID2_XSAVE;
815
816 /*
817 * Hide monitor/mwait until we know how to deal with
818 * these instructions.
819 */
820 regs[2] &= ~CPUID2_MON;
821
822 /*
823 * Hide the performance and debug features.
824 */
825 regs[2] &= ~CPUID2_PDCM;
826
827 /*
828 * No TSC deadline support in the APIC yet
829 */
830 regs[2] &= ~CPUID2_TSCDLT;
831
832 /*
833 * Hide thermal monitoring
834 */
835 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
836
837 /*
838 * Hide the debug store capability.
839 */
840 regs[3] &= ~CPUID_DS;
841
842 /*
843 * Advertise the Machine Check and MTRR capability.
844 *
845 * Some guest OSes (e.g. Windows) will not boot if
846 * these features are absent.
847 */
848 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
849
850 vm_get_topology(vm, &sockets, &cores, &threads,
851 &maxcpus);
852 logical_cpus = threads * cores;
853 regs[1] &= ~CPUID_HTT_CORES;
854 regs[1] |= (logical_cpus & 0xff) << 16;
855 regs[3] |= CPUID_HTT;
856 break;
857
858 case CPUID_0000_0004:
859 cpuid_count(func, param, regs);
860
861 if (regs[0] || regs[1] || regs[2] || regs[3]) {
862 vm_get_topology(vm, &sockets, &cores, &threads,
863 &maxcpus);
864 regs[0] &= 0x3ff;
865 regs[0] |= (cores - 1) << 26;
866 /*
867 * Cache topology:
868 * - L1 and L2 are shared only by the logical
869 * processors in a single core.
870 * - L3 and above are shared by all logical
871 * processors in the package.
872 */
873 logical_cpus = threads;
874 level = (regs[0] >> 5) & 0x7;
875 if (level >= 3)
876 logical_cpus *= cores;
877 regs[0] |= (logical_cpus - 1) << 14;
878 }
879 break;
880
881 case CPUID_0000_0007:
882 regs[0] = 0;
883 regs[1] = 0;
884 regs[2] = 0;
885 regs[3] = 0;
886
887 /* leaf 0 */
888 if (param == 0) {
889 cpuid_count(func, param, regs);
890
891 /* Only leaf 0 is supported */
892 regs[0] = 0;
893
894 /*
895 * Expose known-safe features.
896 */
897 regs[1] &= CPUID_STDEXT_FSGSBASE |
898 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
899 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP |
900 CPUID_STDEXT_BMI2 |
901 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
902 CPUID_STDEXT_AVX512F |
903 CPUID_STDEXT_AVX512DQ |
904 CPUID_STDEXT_RDSEED |
905 CPUID_STDEXT_SMAP |
906 CPUID_STDEXT_AVX512PF |
907 CPUID_STDEXT_AVX512ER |
908 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA |
909 CPUID_STDEXT_AVX512BW |
910 CPUID_STDEXT_AVX512VL;
911 regs[2] &= CPUID_STDEXT2_VAES |
912 CPUID_STDEXT2_VPCLMULQDQ;
913 regs[3] &= CPUID_STDEXT3_MD_CLEAR;
914
915 /* Advertise INVPCID if it is enabled. */
916 error = vm_get_capability(vm, vcpu_id,
917 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
918 if (error == 0 && enable_invpcid)
919 regs[1] |= CPUID_STDEXT_INVPCID;
920 }
921 break;
922
923 case CPUID_0000_0006:
924 regs[0] = CPUTPM1_ARAT;
925 regs[1] = 0;
926 regs[2] = 0;
927 regs[3] = 0;
928 break;
929
930 case CPUID_0000_000A:
931 /*
932 * Handle the access, but report 0 for
933 * all options
934 */
935 regs[0] = 0;
936 regs[1] = 0;
937 regs[2] = 0;
938 regs[3] = 0;
939 break;
940
941 case CPUID_0000_000B:
942 /*
943 * Intel processor topology enumeration
944 */
945 if (vmm_is_intel()) {
946 vm_get_topology(vm, &sockets, &cores, &threads,
947 &maxcpus);
948 if (param == 0) {
949 logical_cpus = threads;
950 width = log2(logical_cpus);
951 level = CPUID_TYPE_SMT;
952 x2apic_id = vcpu_id;
953 }
954
955 if (param == 1) {
956 logical_cpus = threads * cores;
957 width = log2(logical_cpus);
958 level = CPUID_TYPE_CORE;
959 x2apic_id = vcpu_id;
960 }
961
962 if (param >= 2) {
963 width = 0;
964 logical_cpus = 0;
965 level = 0;
966 x2apic_id = 0;
967 }
968
969 regs[0] = width & 0x1f;
970 regs[1] = logical_cpus & 0xffff;
971 regs[2] = (level << 8) | (param & 0xff);
972 regs[3] = x2apic_id;
973 } else {
974 regs[0] = 0;
975 regs[1] = 0;
976 regs[2] = 0;
977 regs[3] = 0;
978 }
979 break;
980
981 case CPUID_0000_000D:
982 limits = vmm_get_xsave_limits();
983 if (!limits->xsave_enabled) {
984 regs[0] = 0;
985 regs[1] = 0;
986 regs[2] = 0;
987 regs[3] = 0;
988 break;
989 }
990
991 cpuid_count(func, param, regs);
992 switch (param) {
993 case 0:
994 /*
995 * Only permit the guest to use bits
996 * that are active in the host in
997 * %xcr0. Also, claim that the
998 * maximum save area size is
999 * equivalent to the host's current
1000 * save area size. Since this runs
1001 * "inside" of vmrun(), it runs with
1002 * the guest's xcr0, so the current
1003 * save area size is correct as-is.
1004 */
1005 regs[0] &= limits->xcr0_allowed;
1006 regs[2] = limits->xsave_max_size;
1007 regs[3] &= (limits->xcr0_allowed >> 32);
1008 break;
1009 case 1:
1010 /* Only permit XSAVEOPT. */
1011 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
1012 regs[1] = 0;
1013 regs[2] = 0;
1014 regs[3] = 0;
1015 break;
1016 default:
1017 /*
1018 * If the leaf is for a permitted feature,
1019 * pass through as-is, otherwise return
1020 * all zeroes.
1021 */
1022 if (!(limits->xcr0_allowed & (1ul << param))) {
1023 regs[0] = 0;
1024 regs[1] = 0;
1025 regs[2] = 0;
1026 regs[3] = 0;
1027 }
1028 break;
1029 }
1030 break;
1031
1032 case CPUID_0000_000F:
1033 case CPUID_0000_0010:
1034 /*
1035 * Do not report any Resource Director Technology
1036 * capabilities. Exposing control of cache or memory
1037 * controller resource partitioning to the guest is not
1038 * at all sensible.
1039 *
1040 * This is already hidden at a high level by masking of
1041 * leaf 0x7. Even still, a guest may look here for
1042 * detailed capability information.
1043 */
1044 regs[0] = 0;
1045 regs[1] = 0;
1046 regs[2] = 0;
1047 regs[3] = 0;
1048 break;
1049
1050 case CPUID_0000_0015:
1051 /*
1052 * Don't report CPU TSC/Crystal ratio and clock
1053 * values since guests may use these to derive the
1054 * local APIC frequency..
1055 */
1056 regs[0] = 0;
1057 regs[1] = 0;
1058 regs[2] = 0;
1059 regs[3] = 0;
1060 break;
1061
1062 case 0x40000000:
1063 regs[0] = CPUID_VM_HIGH;
1064 bcopy(bhyve_id, ®s[1], 4);
1065 bcopy(bhyve_id + 4, ®s[2], 4);
1066 bcopy(bhyve_id + 8, ®s[3], 4);
1067 break;
1068
1069 default:
1070 default_leaf:
1071 /*
1072 * The leaf value has already been clamped so
1073 * simply pass this through.
1074 */
1075 cpuid_count(func, param, regs);
1076 break;
1077 }
1078
1079 *eax = regs[0];
1080 *ebx = regs[1];
1081 *ecx = regs[2];
1082 *edx = regs[3];
1083 }
1084