1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/pcpu.h>
31 #include <sys/systm.h>
32 #include <sys/sysctl.h>
33
34 #include <machine/clock.h>
35 #include <machine/cpufunc.h>
36 #include <machine/md_var.h>
37 #include <machine/segments.h>
38 #include <machine/specialreg.h>
39 #include <machine/vmm.h>
40
41 #include <dev/vmm/vmm_ktr.h>
42
43 #include "vmm_host.h"
44 #include "vmm_util.h"
45 #include "x86.h"
46
47 SYSCTL_DECL(_hw_vmm);
48 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
49 NULL);
50
51 #define CPUID_VM_SIGNATURE 0x40000000
52 #define CPUID_BHYVE_FEATURES 0x40000001
53 #define CPUID_VM_HIGH CPUID_BHYVE_FEATURES
54
55 /* Features advertised in CPUID_BHYVE_FEATURES %eax */
56 #define CPUID_BHYVE_FEAT_EXT_DEST_ID (1UL << 0) /* MSI Extended Dest ID */
57
58 static const char bhyve_id[12] = "bhyve bhyve ";
59
60 static uint64_t bhyve_xcpuids;
61 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
62 "Number of times an unknown cpuid leaf was accessed");
63
64 static int cpuid_leaf_b = 1;
65 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
66 &cpuid_leaf_b, 0, NULL);
67
68 /*
69 * Compute ceil(log2(x)). Returns -1 if x is zero.
70 */
71 static __inline int
log2(u_int x)72 log2(u_int x)
73 {
74
75 return (x == 0 ? -1 : order_base_2(x));
76 }
77
78 int
x86_emulate_cpuid(struct vcpu * vcpu,uint64_t * rax,uint64_t * rbx,uint64_t * rcx,uint64_t * rdx)79 x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
80 uint64_t *rcx, uint64_t *rdx)
81 {
82 struct vm *vm = vcpu_vm(vcpu);
83 int vcpu_id = vcpu_vcpuid(vcpu);
84 const struct xsave_limits *limits;
85 uint64_t cr4;
86 int error, enable_invpcid, enable_rdpid, enable_rdtscp, level,
87 width, x2apic_id;
88 unsigned int func, regs[4], logical_cpus, param;
89 enum x2apic_state x2apic_state;
90 uint16_t cores, maxcpus, sockets, threads;
91
92 /*
93 * The function of CPUID is controlled through the provided value of
94 * %eax (and secondarily %ecx, for certain leaf data).
95 */
96 func = (uint32_t)*rax;
97 param = (uint32_t)*rcx;
98
99 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param);
100
101 /*
102 * Requests for invalid CPUID levels should map to the highest
103 * available level instead.
104 */
105 if (cpu_exthigh != 0 && func >= 0x80000000) {
106 if (func > cpu_exthigh)
107 func = cpu_exthigh;
108 } else if (func >= CPUID_VM_SIGNATURE) {
109 if (func > CPUID_VM_HIGH)
110 func = CPUID_VM_HIGH;
111 } else if (func > cpu_high) {
112 func = cpu_high;
113 }
114
115 /*
116 * In general the approach used for CPU topology is to
117 * advertise a flat topology where all CPUs are packages with
118 * no multi-core or SMT.
119 */
120 switch (func) {
121 /*
122 * Pass these through to the guest
123 */
124 case CPUID_0000_0000:
125 case CPUID_0000_0002:
126 case CPUID_0000_0003:
127 case CPUID_8000_0000:
128 case CPUID_8000_0002:
129 case CPUID_8000_0003:
130 case CPUID_8000_0004:
131 case CPUID_8000_0006:
132 cpuid_count(func, param, regs);
133 break;
134 case CPUID_8000_0008:
135 cpuid_count(func, param, regs);
136 if (vmm_is_svm()) {
137 /*
138 * As on Intel (0000_0007:0, EDX), mask out
139 * unsupported or unsafe AMD extended features
140 * (8000_0008 EBX).
141 */
142 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF |
143 AMDFEID_XSAVEERPTR);
144
145 vm_get_topology(vm, &sockets, &cores, &threads,
146 &maxcpus);
147 /*
148 * Here, width is ApicIdCoreIdSize, present on
149 * at least Family 15h and newer. It
150 * represents the "number of bits in the
151 * initial apicid that indicate thread id
152 * within a package."
153 *
154 * Our topo_probe_amd() uses it for
155 * pkg_id_shift and other OSes may rely on it.
156 */
157 width = MIN(0xF, log2(threads * cores));
158 logical_cpus = MIN(0xFF, threads * cores - 1);
159 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus;
160 }
161 break;
162
163 case CPUID_8000_0001:
164 cpuid_count(func, param, regs);
165
166 /*
167 * Hide SVM from guest.
168 */
169 regs[2] &= ~AMDID2_SVM;
170
171 /*
172 * Don't advertise extended performance counter MSRs
173 * to the guest.
174 */
175 regs[2] &= ~AMDID2_PCXC;
176 regs[2] &= ~AMDID2_PNXC;
177 regs[2] &= ~AMDID2_PTSCEL2I;
178
179 /*
180 * Don't advertise Instruction Based Sampling feature.
181 */
182 regs[2] &= ~AMDID2_IBS;
183
184 /* NodeID MSR not available */
185 regs[2] &= ~AMDID2_NODE_ID;
186
187 /* Don't advertise the OS visible workaround feature */
188 regs[2] &= ~AMDID2_OSVW;
189
190 /* Hide mwaitx/monitorx capability from the guest */
191 regs[2] &= ~AMDID2_MWAITX;
192
193 /* Advertise RDTSCP if it is enabled. */
194 error = vm_get_capability(vcpu,
195 VM_CAP_RDTSCP, &enable_rdtscp);
196 if (error == 0 && enable_rdtscp)
197 regs[3] |= AMDID_RDTSCP;
198 else
199 regs[3] &= ~AMDID_RDTSCP;
200 break;
201
202 case CPUID_8000_0007:
203 /*
204 * AMD uses this leaf to advertise the processor's
205 * power monitoring and RAS capabilities. These
206 * features are hardware-specific and exposing
207 * them to a guest doesn't make a lot of sense.
208 *
209 * Intel uses this leaf only to advertise the
210 * "Invariant TSC" feature with all other bits
211 * being reserved (set to zero).
212 */
213 regs[0] = 0;
214 regs[1] = 0;
215 regs[2] = 0;
216 regs[3] = 0;
217
218 /*
219 * "Invariant TSC" can be advertised to the guest if:
220 * - host TSC frequency is invariant
221 * - host TSCs are synchronized across physical cpus
222 *
223 * XXX This still falls short because the vcpu
224 * can observe the TSC moving backwards as it
225 * migrates across physical cpus. But at least
226 * it should discourage the guest from using the
227 * TSC to keep track of time.
228 */
229 if (tsc_is_invariant && smp_tsc)
230 regs[3] |= AMDPM_TSC_INVARIANT;
231 break;
232
233 case CPUID_8000_001D:
234 /* AMD Cache topology, like 0000_0004 for Intel. */
235 if (!vmm_is_svm())
236 goto default_leaf;
237
238 /*
239 * Similar to Intel, generate a fictitious cache
240 * topology for the guest with L3 shared by the
241 * package, and L1 and L2 local to a core.
242 */
243 vm_get_topology(vm, &sockets, &cores, &threads,
244 &maxcpus);
245 switch (param) {
246 case 0:
247 logical_cpus = threads;
248 level = 1;
249 func = 1; /* data cache */
250 break;
251 case 1:
252 logical_cpus = threads;
253 level = 2;
254 func = 3; /* unified cache */
255 break;
256 case 2:
257 logical_cpus = threads * cores;
258 level = 3;
259 func = 3; /* unified cache */
260 break;
261 default:
262 logical_cpus = sockets * threads * cores;
263 level = 0;
264 func = 0;
265 break;
266 }
267
268 logical_cpus = MIN(0xfff, logical_cpus - 1);
269 regs[0] = (logical_cpus << 14) | (1 << 8) |
270 (level << 5) | func;
271 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0;
272
273 /*
274 * ecx: Number of cache ways for non-fully
275 * associative cache, minus 1. Reported value
276 * of zero means there is one way.
277 */
278 regs[2] = 0;
279
280 regs[3] = 0;
281 break;
282
283 case CPUID_8000_001E:
284 /*
285 * AMD Family 16h+ and Hygon Family 18h additional
286 * identifiers.
287 */
288 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16)
289 goto default_leaf;
290
291 vm_get_topology(vm, &sockets, &cores, &threads,
292 &maxcpus);
293 regs[0] = vcpu_id;
294 threads = MIN(0xFF, threads - 1);
295 regs[1] = (threads << 8) |
296 (vcpu_id >> log2(threads + 1));
297 /*
298 * XXX Bhyve topology cannot yet represent >1 node per
299 * processor.
300 */
301 regs[2] = 0;
302 regs[3] = 0;
303 break;
304
305 case CPUID_0000_0001:
306 do_cpuid(1, regs);
307
308 error = vm_get_x2apic_state(vcpu, &x2apic_state);
309 if (error) {
310 panic("x86_emulate_cpuid: error %d "
311 "fetching x2apic state", error);
312 }
313
314 /*
315 * Override the APIC ID only in ebx
316 */
317 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
318 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
319
320 /*
321 * Don't expose VMX, SpeedStep, TME or SMX capability.
322 * Advertise x2APIC capability and Hypervisor guest.
323 */
324 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
325 regs[2] &= ~(CPUID2_SMX);
326
327 regs[2] |= CPUID2_HV;
328
329 if (x2apic_state != X2APIC_DISABLED)
330 regs[2] |= CPUID2_X2APIC;
331 else
332 regs[2] &= ~CPUID2_X2APIC;
333
334 /*
335 * Only advertise CPUID2_XSAVE in the guest if
336 * the host is using XSAVE.
337 */
338 if (!(regs[2] & CPUID2_OSXSAVE))
339 regs[2] &= ~CPUID2_XSAVE;
340
341 /*
342 * If CPUID2_XSAVE is being advertised and the
343 * guest has set CR4_XSAVE, set
344 * CPUID2_OSXSAVE.
345 */
346 regs[2] &= ~CPUID2_OSXSAVE;
347 if (regs[2] & CPUID2_XSAVE) {
348 error = vm_get_register(vcpu,
349 VM_REG_GUEST_CR4, &cr4);
350 if (error)
351 panic("x86_emulate_cpuid: error %d "
352 "fetching %%cr4", error);
353 if (cr4 & CR4_XSAVE)
354 regs[2] |= CPUID2_OSXSAVE;
355 }
356
357 /*
358 * Hide monitor/mwait until we know how to deal with
359 * these instructions.
360 */
361 regs[2] &= ~CPUID2_MON;
362
363 /*
364 * Hide the performance and debug features.
365 */
366 regs[2] &= ~CPUID2_PDCM;
367
368 /*
369 * No TSC deadline support in the APIC yet
370 */
371 regs[2] &= ~CPUID2_TSCDLT;
372
373 /*
374 * Hide thermal monitoring
375 */
376 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
377
378 /*
379 * Hide the debug store capability.
380 */
381 regs[3] &= ~CPUID_DS;
382
383 /*
384 * Advertise the Machine Check and MTRR capability.
385 *
386 * Some guest OSes (e.g. Windows) will not boot if
387 * these features are absent.
388 */
389 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
390
391 vm_get_topology(vm, &sockets, &cores, &threads,
392 &maxcpus);
393 logical_cpus = threads * cores;
394 regs[1] &= ~CPUID_HTT_CORES;
395 regs[1] |= (logical_cpus & 0xff) << 16;
396 regs[3] |= CPUID_HTT;
397 break;
398
399 case CPUID_0000_0004:
400 cpuid_count(func, param, regs);
401
402 if (regs[0] || regs[1] || regs[2] || regs[3]) {
403 vm_get_topology(vm, &sockets, &cores, &threads,
404 &maxcpus);
405 regs[0] &= 0x3ff;
406 regs[0] |= (cores - 1) << 26;
407 /*
408 * Cache topology:
409 * - L1 and L2 are shared only by the logical
410 * processors in a single core.
411 * - L3 and above are shared by all logical
412 * processors in the package.
413 */
414 logical_cpus = threads;
415 level = (regs[0] >> 5) & 0x7;
416 if (level >= 3)
417 logical_cpus *= cores;
418 regs[0] |= (logical_cpus - 1) << 14;
419 }
420 break;
421
422 case CPUID_0000_0007:
423 regs[0] = 0;
424 regs[1] = 0;
425 regs[2] = 0;
426 regs[3] = 0;
427
428 /* leaf 0 */
429 if (param == 0) {
430 cpuid_count(func, param, regs);
431
432 /* Only leaf 0 is supported */
433 regs[0] = 0;
434
435 /*
436 * Expose known-safe features.
437 */
438 regs[1] &= CPUID_STDEXT_FSGSBASE |
439 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
440 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP |
441 CPUID_STDEXT_BMI2 |
442 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
443 CPUID_STDEXT_AVX512F |
444 CPUID_STDEXT_AVX512DQ |
445 CPUID_STDEXT_RDSEED |
446 CPUID_STDEXT_SMAP |
447 CPUID_STDEXT_AVX512PF |
448 CPUID_STDEXT_AVX512ER |
449 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA |
450 CPUID_STDEXT_AVX512BW |
451 CPUID_STDEXT_AVX512VL;
452 regs[2] &= CPUID_STDEXT2_VAES |
453 CPUID_STDEXT2_VPCLMULQDQ;
454 regs[3] &= CPUID_STDEXT3_MD_CLEAR;
455
456 /* Advertise RDPID if it is enabled. */
457 error = vm_get_capability(vcpu, VM_CAP_RDPID,
458 &enable_rdpid);
459 if (error == 0 && enable_rdpid)
460 regs[2] |= CPUID_STDEXT2_RDPID;
461
462 /* Advertise INVPCID if it is enabled. */
463 error = vm_get_capability(vcpu,
464 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
465 if (error == 0 && enable_invpcid)
466 regs[1] |= CPUID_STDEXT_INVPCID;
467 }
468 break;
469
470 case CPUID_0000_0006:
471 regs[0] = CPUTPM1_ARAT;
472 regs[1] = 0;
473 regs[2] = 0;
474 regs[3] = 0;
475 break;
476
477 case CPUID_0000_000A:
478 /*
479 * Handle the access, but report 0 for
480 * all options
481 */
482 regs[0] = 0;
483 regs[1] = 0;
484 regs[2] = 0;
485 regs[3] = 0;
486 break;
487
488 case CPUID_0000_000B:
489 /*
490 * Intel processor topology enumeration
491 */
492 if (vmm_is_intel()) {
493 vm_get_topology(vm, &sockets, &cores, &threads,
494 &maxcpus);
495 if (param == 0) {
496 logical_cpus = threads;
497 width = log2(logical_cpus);
498 level = CPUID_TYPE_SMT;
499 x2apic_id = vcpu_id;
500 }
501
502 if (param == 1) {
503 logical_cpus = threads * cores;
504 width = log2(logical_cpus);
505 level = CPUID_TYPE_CORE;
506 x2apic_id = vcpu_id;
507 }
508
509 if (!cpuid_leaf_b || param >= 2) {
510 width = 0;
511 logical_cpus = 0;
512 level = 0;
513 x2apic_id = 0;
514 }
515
516 regs[0] = width & 0x1f;
517 regs[1] = logical_cpus & 0xffff;
518 regs[2] = (level << 8) | (param & 0xff);
519 regs[3] = x2apic_id;
520 } else {
521 regs[0] = 0;
522 regs[1] = 0;
523 regs[2] = 0;
524 regs[3] = 0;
525 }
526 break;
527
528 case CPUID_0000_000D:
529 limits = vmm_get_xsave_limits();
530 if (!limits->xsave_enabled) {
531 regs[0] = 0;
532 regs[1] = 0;
533 regs[2] = 0;
534 regs[3] = 0;
535 break;
536 }
537
538 cpuid_count(func, param, regs);
539 switch (param) {
540 case 0:
541 /*
542 * Only permit the guest to use bits
543 * that are active in the host in
544 * %xcr0. Also, claim that the
545 * maximum save area size is
546 * equivalent to the host's current
547 * save area size. Since this runs
548 * "inside" of vmrun(), it runs with
549 * the guest's xcr0, so the current
550 * save area size is correct as-is.
551 */
552 regs[0] &= limits->xcr0_allowed;
553 regs[2] = limits->xsave_max_size;
554 regs[3] &= (limits->xcr0_allowed >> 32);
555 break;
556 case 1:
557 /* Only permit XSAVEOPT. */
558 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
559 regs[1] = 0;
560 regs[2] = 0;
561 regs[3] = 0;
562 break;
563 default:
564 /*
565 * If the leaf is for a permitted feature,
566 * pass through as-is, otherwise return
567 * all zeroes.
568 */
569 if (!(limits->xcr0_allowed & (1ul << param))) {
570 regs[0] = 0;
571 regs[1] = 0;
572 regs[2] = 0;
573 regs[3] = 0;
574 }
575 break;
576 }
577 break;
578
579 case CPUID_0000_000F:
580 case CPUID_0000_0010:
581 /*
582 * Do not report any Resource Director Technology
583 * capabilities. Exposing control of cache or memory
584 * controller resource partitioning to the guest is not
585 * at all sensible.
586 *
587 * This is already hidden at a high level by masking of
588 * leaf 0x7. Even still, a guest may look here for
589 * detailed capability information.
590 */
591 regs[0] = 0;
592 regs[1] = 0;
593 regs[2] = 0;
594 regs[3] = 0;
595 break;
596
597 case CPUID_0000_0015:
598 /*
599 * Don't report CPU TSC/Crystal ratio and clock
600 * values since guests may use these to derive the
601 * local APIC frequency..
602 */
603 regs[0] = 0;
604 regs[1] = 0;
605 regs[2] = 0;
606 regs[3] = 0;
607 break;
608
609 case CPUID_VM_SIGNATURE:
610 regs[0] = CPUID_VM_HIGH;
611 bcopy(bhyve_id, ®s[1], 4);
612 bcopy(bhyve_id + 4, ®s[2], 4);
613 bcopy(bhyve_id + 8, ®s[3], 4);
614 break;
615
616 case CPUID_BHYVE_FEATURES:
617 regs[0] = CPUID_BHYVE_FEAT_EXT_DEST_ID;
618 regs[1] = 0;
619 regs[2] = 0;
620 regs[3] = 0;
621 break;
622
623 default:
624 default_leaf:
625 /*
626 * The leaf value has already been clamped so
627 * simply pass this through, keeping count of
628 * how many unhandled leaf values have been seen.
629 */
630 atomic_add_long(&bhyve_xcpuids, 1);
631 cpuid_count(func, param, regs);
632 break;
633 }
634
635 /*
636 * CPUID clears the upper 32-bits of the long-mode registers.
637 */
638 *rax = regs[0];
639 *rbx = regs[1];
640 *rcx = regs[2];
641 *rdx = regs[3];
642
643 return (1);
644 }
645
646 bool
vm_cpuid_capability(struct vcpu * vcpu,enum vm_cpuid_capability cap)647 vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap)
648 {
649 bool rv;
650
651 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
652 __func__, cap));
653
654 /*
655 * Simply passthrough the capabilities of the host cpu for now.
656 */
657 rv = false;
658 switch (cap) {
659 case VCC_NO_EXECUTE:
660 if (amd_feature & AMDID_NX)
661 rv = true;
662 break;
663 case VCC_FFXSR:
664 if (amd_feature & AMDID_FFXSR)
665 rv = true;
666 break;
667 case VCC_TCE:
668 if (amd_feature2 & AMDID2_TCE)
669 rv = true;
670 break;
671 default:
672 panic("%s: unknown vm_cpu_capability %d", __func__, cap);
673 }
674 return (rv);
675 }
676
677 int
vm_rdmtrr(struct vm_mtrr * mtrr,u_int num,uint64_t * val)678 vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val)
679 {
680 switch (num) {
681 case MSR_MTRRcap:
682 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX;
683 break;
684 case MSR_MTRRdefType:
685 *val = mtrr->def_type;
686 break;
687 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
688 *val = mtrr->fixed4k[num - MSR_MTRR4kBase];
689 break;
690 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
691 *val = mtrr->fixed16k[num - MSR_MTRR16kBase];
692 break;
693 case MSR_MTRR64kBase:
694 *val = mtrr->fixed64k;
695 break;
696 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: {
697 u_int offset = num - MSR_MTRRVarBase;
698 if (offset % 2 == 0) {
699 *val = mtrr->var[offset / 2].base;
700 } else {
701 *val = mtrr->var[offset / 2].mask;
702 }
703 break;
704 }
705 default:
706 return (-1);
707 }
708
709 return (0);
710 }
711
712 int
vm_wrmtrr(struct vm_mtrr * mtrr,u_int num,uint64_t val)713 vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val)
714 {
715 switch (num) {
716 case MSR_MTRRcap:
717 /* MTRRCAP is read only */
718 return (-1);
719 case MSR_MTRRdefType:
720 if (val & ~VMM_MTRR_DEF_MASK) {
721 /* generate #GP on writes to reserved fields */
722 return (-1);
723 }
724 mtrr->def_type = val;
725 break;
726 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
727 mtrr->fixed4k[num - MSR_MTRR4kBase] = val;
728 break;
729 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
730 mtrr->fixed16k[num - MSR_MTRR16kBase] = val;
731 break;
732 case MSR_MTRR64kBase:
733 mtrr->fixed64k = val;
734 break;
735 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: {
736 u_int offset = num - MSR_MTRRVarBase;
737 if (offset % 2 == 0) {
738 if (val & ~VMM_MTRR_PHYSBASE_MASK) {
739 /* generate #GP on writes to reserved fields */
740 return (-1);
741 }
742 mtrr->var[offset / 2].base = val;
743 } else {
744 if (val & ~VMM_MTRR_PHYSMASK_MASK) {
745 /* generate #GP on writes to reserved fields */
746 return (-1);
747 }
748 mtrr->var[offset / 2].mask = val;
749 }
750 break;
751 }
752 default:
753 return (-1);
754 }
755
756 return (0);
757 }
758