1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/pcpu.h> 36 #include <sys/systm.h> 37 #include <sys/sysctl.h> 38 39 #include <machine/clock.h> 40 #include <machine/cpufunc.h> 41 #include <machine/md_var.h> 42 #include <machine/segments.h> 43 #include <machine/specialreg.h> 44 45 #include <machine/vmm.h> 46 47 #include "vmm_host.h" 48 #include "vmm_ktr.h" 49 #include "vmm_util.h" 50 #include "x86.h" 51 52 SYSCTL_DECL(_hw_vmm); 53 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 54 NULL); 55 56 #define CPUID_VM_HIGH 0x40000000 57 58 static const char bhyve_id[12] = "bhyve bhyve "; 59 60 static uint64_t bhyve_xcpuids; 61 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 62 "Number of times an unknown cpuid leaf was accessed"); 63 64 static int cpuid_leaf_b = 1; 65 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 66 &cpuid_leaf_b, 0, NULL); 67 68 /* 69 * Round up to the next power of two, if necessary, and then take log2. 70 * Returns -1 if argument is zero. 71 */ 72 static __inline int 73 log2(u_int x) 74 { 75 76 return (fls(x << (1 - powerof2(x))) - 1); 77 } 78 79 int 80 x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx, 81 uint64_t *rcx, uint64_t *rdx) 82 { 83 struct vm *vm = vcpu_vm(vcpu); 84 int vcpu_id = vcpu_vcpuid(vcpu); 85 const struct xsave_limits *limits; 86 uint64_t cr4; 87 int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, 88 width, x2apic_id; 89 unsigned int func, regs[4], logical_cpus, param; 90 enum x2apic_state x2apic_state; 91 uint16_t cores, maxcpus, sockets, threads; 92 93 /* 94 * The function of CPUID is controlled through the provided value of 95 * %eax (and secondarily %ecx, for certain leaf data). 96 */ 97 func = (uint32_t)*rax; 98 param = (uint32_t)*rcx; 99 100 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param); 101 102 /* 103 * Requests for invalid CPUID levels should map to the highest 104 * available level instead. 105 */ 106 if (cpu_exthigh != 0 && func >= 0x80000000) { 107 if (func > cpu_exthigh) 108 func = cpu_exthigh; 109 } else if (func >= 0x40000000) { 110 if (func > CPUID_VM_HIGH) 111 func = CPUID_VM_HIGH; 112 } else if (func > cpu_high) { 113 func = cpu_high; 114 } 115 116 /* 117 * In general the approach used for CPU topology is to 118 * advertise a flat topology where all CPUs are packages with 119 * no multi-core or SMT. 120 */ 121 switch (func) { 122 /* 123 * Pass these through to the guest 124 */ 125 case CPUID_0000_0000: 126 case CPUID_0000_0002: 127 case CPUID_0000_0003: 128 case CPUID_8000_0000: 129 case CPUID_8000_0002: 130 case CPUID_8000_0003: 131 case CPUID_8000_0004: 132 case CPUID_8000_0006: 133 cpuid_count(func, param, regs); 134 break; 135 case CPUID_8000_0008: 136 cpuid_count(func, param, regs); 137 if (vmm_is_svm()) { 138 /* 139 * As on Intel (0000_0007:0, EDX), mask out 140 * unsupported or unsafe AMD extended features 141 * (8000_0008 EBX). 142 */ 143 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF | 144 AMDFEID_XSAVEERPTR); 145 146 vm_get_topology(vm, &sockets, &cores, &threads, 147 &maxcpus); 148 /* 149 * Here, width is ApicIdCoreIdSize, present on 150 * at least Family 15h and newer. It 151 * represents the "number of bits in the 152 * initial apicid that indicate thread id 153 * within a package." 154 * 155 * Our topo_probe_amd() uses it for 156 * pkg_id_shift and other OSes may rely on it. 157 */ 158 width = MIN(0xF, log2(threads * cores)); 159 if (width < 0x4) 160 width = 0; 161 logical_cpus = MIN(0xFF, threads * cores - 1); 162 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus; 163 } 164 break; 165 166 case CPUID_8000_0001: 167 cpuid_count(func, param, regs); 168 169 /* 170 * Hide SVM from guest. 171 */ 172 regs[2] &= ~AMDID2_SVM; 173 174 /* 175 * Don't advertise extended performance counter MSRs 176 * to the guest. 177 */ 178 regs[2] &= ~AMDID2_PCXC; 179 regs[2] &= ~AMDID2_PNXC; 180 regs[2] &= ~AMDID2_PTSCEL2I; 181 182 /* 183 * Don't advertise Instruction Based Sampling feature. 184 */ 185 regs[2] &= ~AMDID2_IBS; 186 187 /* NodeID MSR not available */ 188 regs[2] &= ~AMDID2_NODE_ID; 189 190 /* Don't advertise the OS visible workaround feature */ 191 regs[2] &= ~AMDID2_OSVW; 192 193 /* Hide mwaitx/monitorx capability from the guest */ 194 regs[2] &= ~AMDID2_MWAITX; 195 196 /* Advertise RDTSCP if it is enabled. */ 197 error = vm_get_capability(vcpu, 198 VM_CAP_RDTSCP, &enable_rdtscp); 199 if (error == 0 && enable_rdtscp) 200 regs[3] |= AMDID_RDTSCP; 201 else 202 regs[3] &= ~AMDID_RDTSCP; 203 break; 204 205 case CPUID_8000_0007: 206 /* 207 * AMD uses this leaf to advertise the processor's 208 * power monitoring and RAS capabilities. These 209 * features are hardware-specific and exposing 210 * them to a guest doesn't make a lot of sense. 211 * 212 * Intel uses this leaf only to advertise the 213 * "Invariant TSC" feature with all other bits 214 * being reserved (set to zero). 215 */ 216 regs[0] = 0; 217 regs[1] = 0; 218 regs[2] = 0; 219 regs[3] = 0; 220 221 /* 222 * "Invariant TSC" can be advertised to the guest if: 223 * - host TSC frequency is invariant 224 * - host TSCs are synchronized across physical cpus 225 * 226 * XXX This still falls short because the vcpu 227 * can observe the TSC moving backwards as it 228 * migrates across physical cpus. But at least 229 * it should discourage the guest from using the 230 * TSC to keep track of time. 231 */ 232 if (tsc_is_invariant && smp_tsc) 233 regs[3] |= AMDPM_TSC_INVARIANT; 234 break; 235 236 case CPUID_8000_001D: 237 /* AMD Cache topology, like 0000_0004 for Intel. */ 238 if (!vmm_is_svm()) 239 goto default_leaf; 240 241 /* 242 * Similar to Intel, generate a ficticious cache 243 * topology for the guest with L3 shared by the 244 * package, and L1 and L2 local to a core. 245 */ 246 vm_get_topology(vm, &sockets, &cores, &threads, 247 &maxcpus); 248 switch (param) { 249 case 0: 250 logical_cpus = threads; 251 level = 1; 252 func = 1; /* data cache */ 253 break; 254 case 1: 255 logical_cpus = threads; 256 level = 2; 257 func = 3; /* unified cache */ 258 break; 259 case 2: 260 logical_cpus = threads * cores; 261 level = 3; 262 func = 3; /* unified cache */ 263 break; 264 default: 265 logical_cpus = 0; 266 level = 0; 267 func = 0; 268 break; 269 } 270 271 logical_cpus = MIN(0xfff, logical_cpus - 1); 272 regs[0] = (logical_cpus << 14) | (1 << 8) | 273 (level << 5) | func; 274 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0; 275 regs[2] = 0; 276 regs[3] = 0; 277 break; 278 279 case CPUID_8000_001E: 280 /* 281 * AMD Family 16h+ and Hygon Family 18h additional 282 * identifiers. 283 */ 284 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) 285 goto default_leaf; 286 287 vm_get_topology(vm, &sockets, &cores, &threads, 288 &maxcpus); 289 regs[0] = vcpu_id; 290 threads = MIN(0xFF, threads - 1); 291 regs[1] = (threads << 8) | 292 (vcpu_id >> log2(threads + 1)); 293 /* 294 * XXX Bhyve topology cannot yet represent >1 node per 295 * processor. 296 */ 297 regs[2] = 0; 298 regs[3] = 0; 299 break; 300 301 case CPUID_0000_0001: 302 do_cpuid(1, regs); 303 304 error = vm_get_x2apic_state(vcpu, &x2apic_state); 305 if (error) { 306 panic("x86_emulate_cpuid: error %d " 307 "fetching x2apic state", error); 308 } 309 310 /* 311 * Override the APIC ID only in ebx 312 */ 313 regs[1] &= ~(CPUID_LOCAL_APIC_ID); 314 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 315 316 /* 317 * Don't expose VMX, SpeedStep, TME or SMX capability. 318 * Advertise x2APIC capability and Hypervisor guest. 319 */ 320 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 321 regs[2] &= ~(CPUID2_SMX); 322 323 regs[2] |= CPUID2_HV; 324 325 if (x2apic_state != X2APIC_DISABLED) 326 regs[2] |= CPUID2_X2APIC; 327 else 328 regs[2] &= ~CPUID2_X2APIC; 329 330 /* 331 * Only advertise CPUID2_XSAVE in the guest if 332 * the host is using XSAVE. 333 */ 334 if (!(regs[2] & CPUID2_OSXSAVE)) 335 regs[2] &= ~CPUID2_XSAVE; 336 337 /* 338 * If CPUID2_XSAVE is being advertised and the 339 * guest has set CR4_XSAVE, set 340 * CPUID2_OSXSAVE. 341 */ 342 regs[2] &= ~CPUID2_OSXSAVE; 343 if (regs[2] & CPUID2_XSAVE) { 344 error = vm_get_register(vcpu, 345 VM_REG_GUEST_CR4, &cr4); 346 if (error) 347 panic("x86_emulate_cpuid: error %d " 348 "fetching %%cr4", error); 349 if (cr4 & CR4_XSAVE) 350 regs[2] |= CPUID2_OSXSAVE; 351 } 352 353 /* 354 * Hide monitor/mwait until we know how to deal with 355 * these instructions. 356 */ 357 regs[2] &= ~CPUID2_MON; 358 359 /* 360 * Hide the performance and debug features. 361 */ 362 regs[2] &= ~CPUID2_PDCM; 363 364 /* 365 * No TSC deadline support in the APIC yet 366 */ 367 regs[2] &= ~CPUID2_TSCDLT; 368 369 /* 370 * Hide thermal monitoring 371 */ 372 regs[3] &= ~(CPUID_ACPI | CPUID_TM); 373 374 /* 375 * Hide the debug store capability. 376 */ 377 regs[3] &= ~CPUID_DS; 378 379 /* 380 * Advertise the Machine Check and MTRR capability. 381 * 382 * Some guest OSes (e.g. Windows) will not boot if 383 * these features are absent. 384 */ 385 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 386 387 vm_get_topology(vm, &sockets, &cores, &threads, 388 &maxcpus); 389 logical_cpus = threads * cores; 390 regs[1] &= ~CPUID_HTT_CORES; 391 regs[1] |= (logical_cpus & 0xff) << 16; 392 regs[3] |= CPUID_HTT; 393 break; 394 395 case CPUID_0000_0004: 396 cpuid_count(func, param, regs); 397 398 if (regs[0] || regs[1] || regs[2] || regs[3]) { 399 vm_get_topology(vm, &sockets, &cores, &threads, 400 &maxcpus); 401 regs[0] &= 0x3ff; 402 regs[0] |= (cores - 1) << 26; 403 /* 404 * Cache topology: 405 * - L1 and L2 are shared only by the logical 406 * processors in a single core. 407 * - L3 and above are shared by all logical 408 * processors in the package. 409 */ 410 logical_cpus = threads; 411 level = (regs[0] >> 5) & 0x7; 412 if (level >= 3) 413 logical_cpus *= cores; 414 regs[0] |= (logical_cpus - 1) << 14; 415 } 416 break; 417 418 case CPUID_0000_0007: 419 regs[0] = 0; 420 regs[1] = 0; 421 regs[2] = 0; 422 regs[3] = 0; 423 424 /* leaf 0 */ 425 if (param == 0) { 426 cpuid_count(func, param, regs); 427 428 /* Only leaf 0 is supported */ 429 regs[0] = 0; 430 431 /* 432 * Expose known-safe features. 433 */ 434 regs[1] &= (CPUID_STDEXT_FSGSBASE | 435 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 436 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP | 437 CPUID_STDEXT_BMI2 | 438 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 439 CPUID_STDEXT_AVX512F | 440 CPUID_STDEXT_RDSEED | 441 CPUID_STDEXT_SMAP | 442 CPUID_STDEXT_AVX512PF | 443 CPUID_STDEXT_AVX512ER | 444 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA); 445 regs[2] = 0; 446 regs[3] &= CPUID_STDEXT3_MD_CLEAR; 447 448 /* Advertise RDPID if it is enabled. */ 449 error = vm_get_capability(vcpu, VM_CAP_RDPID, 450 &enable_rdpid); 451 if (error == 0 && enable_rdpid) 452 regs[2] |= CPUID_STDEXT2_RDPID; 453 454 /* Advertise INVPCID if it is enabled. */ 455 error = vm_get_capability(vcpu, 456 VM_CAP_ENABLE_INVPCID, &enable_invpcid); 457 if (error == 0 && enable_invpcid) 458 regs[1] |= CPUID_STDEXT_INVPCID; 459 } 460 break; 461 462 case CPUID_0000_0006: 463 regs[0] = CPUTPM1_ARAT; 464 regs[1] = 0; 465 regs[2] = 0; 466 regs[3] = 0; 467 break; 468 469 case CPUID_0000_000A: 470 /* 471 * Handle the access, but report 0 for 472 * all options 473 */ 474 regs[0] = 0; 475 regs[1] = 0; 476 regs[2] = 0; 477 regs[3] = 0; 478 break; 479 480 case CPUID_0000_000B: 481 /* 482 * Intel processor topology enumeration 483 */ 484 if (vmm_is_intel()) { 485 vm_get_topology(vm, &sockets, &cores, &threads, 486 &maxcpus); 487 if (param == 0) { 488 logical_cpus = threads; 489 width = log2(logical_cpus); 490 level = CPUID_TYPE_SMT; 491 x2apic_id = vcpu_id; 492 } 493 494 if (param == 1) { 495 logical_cpus = threads * cores; 496 width = log2(logical_cpus); 497 level = CPUID_TYPE_CORE; 498 x2apic_id = vcpu_id; 499 } 500 501 if (!cpuid_leaf_b || param >= 2) { 502 width = 0; 503 logical_cpus = 0; 504 level = 0; 505 x2apic_id = 0; 506 } 507 508 regs[0] = width & 0x1f; 509 regs[1] = logical_cpus & 0xffff; 510 regs[2] = (level << 8) | (param & 0xff); 511 regs[3] = x2apic_id; 512 } else { 513 regs[0] = 0; 514 regs[1] = 0; 515 regs[2] = 0; 516 regs[3] = 0; 517 } 518 break; 519 520 case CPUID_0000_000D: 521 limits = vmm_get_xsave_limits(); 522 if (!limits->xsave_enabled) { 523 regs[0] = 0; 524 regs[1] = 0; 525 regs[2] = 0; 526 regs[3] = 0; 527 break; 528 } 529 530 cpuid_count(func, param, regs); 531 switch (param) { 532 case 0: 533 /* 534 * Only permit the guest to use bits 535 * that are active in the host in 536 * %xcr0. Also, claim that the 537 * maximum save area size is 538 * equivalent to the host's current 539 * save area size. Since this runs 540 * "inside" of vmrun(), it runs with 541 * the guest's xcr0, so the current 542 * save area size is correct as-is. 543 */ 544 regs[0] &= limits->xcr0_allowed; 545 regs[2] = limits->xsave_max_size; 546 regs[3] &= (limits->xcr0_allowed >> 32); 547 break; 548 case 1: 549 /* Only permit XSAVEOPT. */ 550 regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 551 regs[1] = 0; 552 regs[2] = 0; 553 regs[3] = 0; 554 break; 555 default: 556 /* 557 * If the leaf is for a permitted feature, 558 * pass through as-is, otherwise return 559 * all zeroes. 560 */ 561 if (!(limits->xcr0_allowed & (1ul << param))) { 562 regs[0] = 0; 563 regs[1] = 0; 564 regs[2] = 0; 565 regs[3] = 0; 566 } 567 break; 568 } 569 break; 570 571 case CPUID_0000_000F: 572 case CPUID_0000_0010: 573 /* 574 * Do not report any Resource Director Technology 575 * capabilities. Exposing control of cache or memory 576 * controller resource partitioning to the guest is not 577 * at all sensible. 578 * 579 * This is already hidden at a high level by masking of 580 * leaf 0x7. Even still, a guest may look here for 581 * detailed capability information. 582 */ 583 regs[0] = 0; 584 regs[1] = 0; 585 regs[2] = 0; 586 regs[3] = 0; 587 break; 588 589 case CPUID_0000_0015: 590 /* 591 * Don't report CPU TSC/Crystal ratio and clock 592 * values since guests may use these to derive the 593 * local APIC frequency.. 594 */ 595 regs[0] = 0; 596 regs[1] = 0; 597 regs[2] = 0; 598 regs[3] = 0; 599 break; 600 601 case 0x40000000: 602 regs[0] = CPUID_VM_HIGH; 603 bcopy(bhyve_id, ®s[1], 4); 604 bcopy(bhyve_id + 4, ®s[2], 4); 605 bcopy(bhyve_id + 8, ®s[3], 4); 606 break; 607 608 default: 609 default_leaf: 610 /* 611 * The leaf value has already been clamped so 612 * simply pass this through, keeping count of 613 * how many unhandled leaf values have been seen. 614 */ 615 atomic_add_long(&bhyve_xcpuids, 1); 616 cpuid_count(func, param, regs); 617 break; 618 } 619 620 /* 621 * CPUID clears the upper 32-bits of the long-mode registers. 622 */ 623 *rax = regs[0]; 624 *rbx = regs[1]; 625 *rcx = regs[2]; 626 *rdx = regs[3]; 627 628 return (1); 629 } 630 631 bool 632 vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap) 633 { 634 bool rv; 635 636 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 637 __func__, cap)); 638 639 /* 640 * Simply passthrough the capabilities of the host cpu for now. 641 */ 642 rv = false; 643 switch (cap) { 644 case VCC_NO_EXECUTE: 645 if (amd_feature & AMDID_NX) 646 rv = true; 647 break; 648 case VCC_FFXSR: 649 if (amd_feature & AMDID_FFXSR) 650 rv = true; 651 break; 652 case VCC_TCE: 653 if (amd_feature2 & AMDID2_TCE) 654 rv = true; 655 break; 656 default: 657 panic("%s: unknown vm_cpu_capability %d", __func__, cap); 658 } 659 return (rv); 660 } 661 662 int 663 vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val) 664 { 665 switch (num) { 666 case MSR_MTRRcap: 667 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 668 break; 669 case MSR_MTRRdefType: 670 *val = mtrr->def_type; 671 break; 672 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 673 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 674 break; 675 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 676 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 677 break; 678 case MSR_MTRR64kBase: 679 *val = mtrr->fixed64k; 680 break; 681 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 682 u_int offset = num - MSR_MTRRVarBase; 683 if (offset % 2 == 0) { 684 *val = mtrr->var[offset / 2].base; 685 } else { 686 *val = mtrr->var[offset / 2].mask; 687 } 688 break; 689 } 690 default: 691 return (-1); 692 } 693 694 return (0); 695 } 696 697 int 698 vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val) 699 { 700 switch (num) { 701 case MSR_MTRRcap: 702 /* MTRRCAP is read only */ 703 return (-1); 704 case MSR_MTRRdefType: 705 if (val & ~VMM_MTRR_DEF_MASK) { 706 /* generate #GP on writes to reserved fields */ 707 return (-1); 708 } 709 mtrr->def_type = val; 710 break; 711 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 712 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 713 break; 714 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 715 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 716 break; 717 case MSR_MTRR64kBase: 718 mtrr->fixed64k = val; 719 break; 720 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 721 u_int offset = num - MSR_MTRRVarBase; 722 if (offset % 2 == 0) { 723 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 724 /* generate #GP on writes to reserved fields */ 725 return (-1); 726 } 727 mtrr->var[offset / 2].base = val; 728 } else { 729 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 730 /* generate #GP on writes to reserved fields */ 731 return (-1); 732 } 733 mtrr->var[offset / 2].mask = val; 734 } 735 break; 736 } 737 default: 738 return (-1); 739 } 740 741 return (0); 742 } 743