1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/pcpu.h> 36 #include <sys/systm.h> 37 #include <sys/sysctl.h> 38 39 #include <machine/clock.h> 40 #include <machine/cpufunc.h> 41 #include <machine/md_var.h> 42 #include <machine/segments.h> 43 #include <machine/specialreg.h> 44 45 #include <machine/vmm.h> 46 47 #include "vmm_host.h" 48 #include "vmm_ktr.h" 49 #include "vmm_util.h" 50 #include "x86.h" 51 52 SYSCTL_DECL(_hw_vmm); 53 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 54 NULL); 55 56 #define CPUID_VM_HIGH 0x40000000 57 58 static const char bhyve_id[12] = "bhyve bhyve "; 59 60 static uint64_t bhyve_xcpuids; 61 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 62 "Number of times an unknown cpuid leaf was accessed"); 63 64 static int cpuid_leaf_b = 1; 65 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 66 &cpuid_leaf_b, 0, NULL); 67 68 /* 69 * Round up to the next power of two, if necessary, and then take log2. 70 * Returns -1 if argument is zero. 71 */ 72 static __inline int 73 log2(u_int x) 74 { 75 76 return (fls(x << (1 - powerof2(x))) - 1); 77 } 78 79 int 80 x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx, 81 uint64_t *rcx, uint64_t *rdx) 82 { 83 struct vm *vm = vcpu_vm(vcpu); 84 int vcpu_id = vcpu_vcpuid(vcpu); 85 const struct xsave_limits *limits; 86 uint64_t cr4; 87 int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, 88 width, x2apic_id; 89 unsigned int func, regs[4], logical_cpus, param; 90 enum x2apic_state x2apic_state; 91 uint16_t cores, maxcpus, sockets, threads; 92 93 /* 94 * The function of CPUID is controlled through the provided value of 95 * %eax (and secondarily %ecx, for certain leaf data). 96 */ 97 func = (uint32_t)*rax; 98 param = (uint32_t)*rcx; 99 100 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param); 101 102 /* 103 * Requests for invalid CPUID levels should map to the highest 104 * available level instead. 105 */ 106 if (cpu_exthigh != 0 && func >= 0x80000000) { 107 if (func > cpu_exthigh) 108 func = cpu_exthigh; 109 } else if (func >= 0x40000000) { 110 if (func > CPUID_VM_HIGH) 111 func = CPUID_VM_HIGH; 112 } else if (func > cpu_high) { 113 func = cpu_high; 114 } 115 116 /* 117 * In general the approach used for CPU topology is to 118 * advertise a flat topology where all CPUs are packages with 119 * no multi-core or SMT. 120 */ 121 switch (func) { 122 /* 123 * Pass these through to the guest 124 */ 125 case CPUID_0000_0000: 126 case CPUID_0000_0002: 127 case CPUID_0000_0003: 128 case CPUID_8000_0000: 129 case CPUID_8000_0002: 130 case CPUID_8000_0003: 131 case CPUID_8000_0004: 132 case CPUID_8000_0006: 133 cpuid_count(func, param, regs); 134 break; 135 case CPUID_8000_0008: 136 cpuid_count(func, param, regs); 137 if (vmm_is_svm()) { 138 /* 139 * As on Intel (0000_0007:0, EDX), mask out 140 * unsupported or unsafe AMD extended features 141 * (8000_0008 EBX). 142 */ 143 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF | 144 AMDFEID_XSAVEERPTR); 145 146 vm_get_topology(vm, &sockets, &cores, &threads, 147 &maxcpus); 148 /* 149 * Here, width is ApicIdCoreIdSize, present on 150 * at least Family 15h and newer. It 151 * represents the "number of bits in the 152 * initial apicid that indicate thread id 153 * within a package." 154 * 155 * Our topo_probe_amd() uses it for 156 * pkg_id_shift and other OSes may rely on it. 157 */ 158 width = MIN(0xF, log2(threads * cores)); 159 if (width < 0x4) 160 width = 0; 161 logical_cpus = MIN(0xFF, threads * cores - 1); 162 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus; 163 } 164 break; 165 166 case CPUID_8000_0001: 167 cpuid_count(func, param, regs); 168 169 /* 170 * Hide SVM from guest. 171 */ 172 regs[2] &= ~AMDID2_SVM; 173 174 /* 175 * Don't advertise extended performance counter MSRs 176 * to the guest. 177 */ 178 regs[2] &= ~AMDID2_PCXC; 179 regs[2] &= ~AMDID2_PNXC; 180 regs[2] &= ~AMDID2_PTSCEL2I; 181 182 /* 183 * Don't advertise Instruction Based Sampling feature. 184 */ 185 regs[2] &= ~AMDID2_IBS; 186 187 /* NodeID MSR not available */ 188 regs[2] &= ~AMDID2_NODE_ID; 189 190 /* Don't advertise the OS visible workaround feature */ 191 regs[2] &= ~AMDID2_OSVW; 192 193 /* Hide mwaitx/monitorx capability from the guest */ 194 regs[2] &= ~AMDID2_MWAITX; 195 196 /* Advertise RDTSCP if it is enabled. */ 197 error = vm_get_capability(vcpu, 198 VM_CAP_RDTSCP, &enable_rdtscp); 199 if (error == 0 && enable_rdtscp) 200 regs[3] |= AMDID_RDTSCP; 201 else 202 regs[3] &= ~AMDID_RDTSCP; 203 break; 204 205 case CPUID_8000_0007: 206 /* 207 * AMD uses this leaf to advertise the processor's 208 * power monitoring and RAS capabilities. These 209 * features are hardware-specific and exposing 210 * them to a guest doesn't make a lot of sense. 211 * 212 * Intel uses this leaf only to advertise the 213 * "Invariant TSC" feature with all other bits 214 * being reserved (set to zero). 215 */ 216 regs[0] = 0; 217 regs[1] = 0; 218 regs[2] = 0; 219 regs[3] = 0; 220 221 /* 222 * "Invariant TSC" can be advertised to the guest if: 223 * - host TSC frequency is invariant 224 * - host TSCs are synchronized across physical cpus 225 * 226 * XXX This still falls short because the vcpu 227 * can observe the TSC moving backwards as it 228 * migrates across physical cpus. But at least 229 * it should discourage the guest from using the 230 * TSC to keep track of time. 231 */ 232 if (tsc_is_invariant && smp_tsc) 233 regs[3] |= AMDPM_TSC_INVARIANT; 234 break; 235 236 case CPUID_8000_001D: 237 /* AMD Cache topology, like 0000_0004 for Intel. */ 238 if (!vmm_is_svm()) 239 goto default_leaf; 240 241 /* 242 * Similar to Intel, generate a ficticious cache 243 * topology for the guest with L3 shared by the 244 * package, and L1 and L2 local to a core. 245 */ 246 vm_get_topology(vm, &sockets, &cores, &threads, 247 &maxcpus); 248 switch (param) { 249 case 0: 250 logical_cpus = threads; 251 level = 1; 252 func = 1; /* data cache */ 253 break; 254 case 1: 255 logical_cpus = threads; 256 level = 2; 257 func = 3; /* unified cache */ 258 break; 259 case 2: 260 logical_cpus = threads * cores; 261 level = 3; 262 func = 3; /* unified cache */ 263 break; 264 default: 265 logical_cpus = 0; 266 level = 0; 267 func = 0; 268 break; 269 } 270 271 logical_cpus = MIN(0xfff, logical_cpus - 1); 272 regs[0] = (logical_cpus << 14) | (1 << 8) | 273 (level << 5) | func; 274 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0; 275 regs[2] = 0; 276 regs[3] = 0; 277 break; 278 279 case CPUID_8000_001E: 280 /* 281 * AMD Family 16h+ and Hygon Family 18h additional 282 * identifiers. 283 */ 284 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) 285 goto default_leaf; 286 287 vm_get_topology(vm, &sockets, &cores, &threads, 288 &maxcpus); 289 regs[0] = vcpu_id; 290 threads = MIN(0xFF, threads - 1); 291 regs[1] = (threads << 8) | 292 (vcpu_id >> log2(threads + 1)); 293 /* 294 * XXX Bhyve topology cannot yet represent >1 node per 295 * processor. 296 */ 297 regs[2] = 0; 298 regs[3] = 0; 299 break; 300 301 case CPUID_0000_0001: 302 do_cpuid(1, regs); 303 304 error = vm_get_x2apic_state(vcpu, &x2apic_state); 305 if (error) { 306 panic("x86_emulate_cpuid: error %d " 307 "fetching x2apic state", error); 308 } 309 310 /* 311 * Override the APIC ID only in ebx 312 */ 313 regs[1] &= ~(CPUID_LOCAL_APIC_ID); 314 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 315 316 /* 317 * Don't expose VMX, SpeedStep, TME or SMX capability. 318 * Advertise x2APIC capability and Hypervisor guest. 319 */ 320 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 321 regs[2] &= ~(CPUID2_SMX); 322 323 regs[2] |= CPUID2_HV; 324 325 if (x2apic_state != X2APIC_DISABLED) 326 regs[2] |= CPUID2_X2APIC; 327 else 328 regs[2] &= ~CPUID2_X2APIC; 329 330 /* 331 * Only advertise CPUID2_XSAVE in the guest if 332 * the host is using XSAVE. 333 */ 334 if (!(regs[2] & CPUID2_OSXSAVE)) 335 regs[2] &= ~CPUID2_XSAVE; 336 337 /* 338 * If CPUID2_XSAVE is being advertised and the 339 * guest has set CR4_XSAVE, set 340 * CPUID2_OSXSAVE. 341 */ 342 regs[2] &= ~CPUID2_OSXSAVE; 343 if (regs[2] & CPUID2_XSAVE) { 344 error = vm_get_register(vcpu, 345 VM_REG_GUEST_CR4, &cr4); 346 if (error) 347 panic("x86_emulate_cpuid: error %d " 348 "fetching %%cr4", error); 349 if (cr4 & CR4_XSAVE) 350 regs[2] |= CPUID2_OSXSAVE; 351 } 352 353 /* 354 * Hide monitor/mwait until we know how to deal with 355 * these instructions. 356 */ 357 regs[2] &= ~CPUID2_MON; 358 359 /* 360 * Hide the performance and debug features. 361 */ 362 regs[2] &= ~CPUID2_PDCM; 363 364 /* 365 * No TSC deadline support in the APIC yet 366 */ 367 regs[2] &= ~CPUID2_TSCDLT; 368 369 /* 370 * Hide thermal monitoring 371 */ 372 regs[3] &= ~(CPUID_ACPI | CPUID_TM); 373 374 /* 375 * Hide the debug store capability. 376 */ 377 regs[3] &= ~CPUID_DS; 378 379 /* 380 * Advertise the Machine Check and MTRR capability. 381 * 382 * Some guest OSes (e.g. Windows) will not boot if 383 * these features are absent. 384 */ 385 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 386 387 vm_get_topology(vm, &sockets, &cores, &threads, 388 &maxcpus); 389 logical_cpus = threads * cores; 390 regs[1] &= ~CPUID_HTT_CORES; 391 regs[1] |= (logical_cpus & 0xff) << 16; 392 regs[3] |= CPUID_HTT; 393 break; 394 395 case CPUID_0000_0004: 396 cpuid_count(func, param, regs); 397 398 if (regs[0] || regs[1] || regs[2] || regs[3]) { 399 vm_get_topology(vm, &sockets, &cores, &threads, 400 &maxcpus); 401 regs[0] &= 0x3ff; 402 regs[0] |= (cores - 1) << 26; 403 /* 404 * Cache topology: 405 * - L1 and L2 are shared only by the logical 406 * processors in a single core. 407 * - L3 and above are shared by all logical 408 * processors in the package. 409 */ 410 logical_cpus = threads; 411 level = (regs[0] >> 5) & 0x7; 412 if (level >= 3) 413 logical_cpus *= cores; 414 regs[0] |= (logical_cpus - 1) << 14; 415 } 416 break; 417 418 case CPUID_0000_0007: 419 regs[0] = 0; 420 regs[1] = 0; 421 regs[2] = 0; 422 regs[3] = 0; 423 424 /* leaf 0 */ 425 if (param == 0) { 426 cpuid_count(func, param, regs); 427 428 /* Only leaf 0 is supported */ 429 regs[0] = 0; 430 431 /* 432 * Expose known-safe features. 433 */ 434 regs[1] &= CPUID_STDEXT_FSGSBASE | 435 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 436 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP | 437 CPUID_STDEXT_BMI2 | 438 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 439 CPUID_STDEXT_AVX512F | 440 CPUID_STDEXT_AVX512DQ | 441 CPUID_STDEXT_RDSEED | 442 CPUID_STDEXT_SMAP | 443 CPUID_STDEXT_AVX512PF | 444 CPUID_STDEXT_AVX512ER | 445 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA | 446 CPUID_STDEXT_AVX512BW | 447 CPUID_STDEXT_AVX512VL; 448 regs[2] &= CPUID_STDEXT2_VAES | 449 CPUID_STDEXT2_VPCLMULQDQ; 450 regs[3] &= CPUID_STDEXT3_MD_CLEAR; 451 452 /* Advertise RDPID if it is enabled. */ 453 error = vm_get_capability(vcpu, VM_CAP_RDPID, 454 &enable_rdpid); 455 if (error == 0 && enable_rdpid) 456 regs[2] |= CPUID_STDEXT2_RDPID; 457 458 /* Advertise INVPCID if it is enabled. */ 459 error = vm_get_capability(vcpu, 460 VM_CAP_ENABLE_INVPCID, &enable_invpcid); 461 if (error == 0 && enable_invpcid) 462 regs[1] |= CPUID_STDEXT_INVPCID; 463 } 464 break; 465 466 case CPUID_0000_0006: 467 regs[0] = CPUTPM1_ARAT; 468 regs[1] = 0; 469 regs[2] = 0; 470 regs[3] = 0; 471 break; 472 473 case CPUID_0000_000A: 474 /* 475 * Handle the access, but report 0 for 476 * all options 477 */ 478 regs[0] = 0; 479 regs[1] = 0; 480 regs[2] = 0; 481 regs[3] = 0; 482 break; 483 484 case CPUID_0000_000B: 485 /* 486 * Intel processor topology enumeration 487 */ 488 if (vmm_is_intel()) { 489 vm_get_topology(vm, &sockets, &cores, &threads, 490 &maxcpus); 491 if (param == 0) { 492 logical_cpus = threads; 493 width = log2(logical_cpus); 494 level = CPUID_TYPE_SMT; 495 x2apic_id = vcpu_id; 496 } 497 498 if (param == 1) { 499 logical_cpus = threads * cores; 500 width = log2(logical_cpus); 501 level = CPUID_TYPE_CORE; 502 x2apic_id = vcpu_id; 503 } 504 505 if (!cpuid_leaf_b || param >= 2) { 506 width = 0; 507 logical_cpus = 0; 508 level = 0; 509 x2apic_id = 0; 510 } 511 512 regs[0] = width & 0x1f; 513 regs[1] = logical_cpus & 0xffff; 514 regs[2] = (level << 8) | (param & 0xff); 515 regs[3] = x2apic_id; 516 } else { 517 regs[0] = 0; 518 regs[1] = 0; 519 regs[2] = 0; 520 regs[3] = 0; 521 } 522 break; 523 524 case CPUID_0000_000D: 525 limits = vmm_get_xsave_limits(); 526 if (!limits->xsave_enabled) { 527 regs[0] = 0; 528 regs[1] = 0; 529 regs[2] = 0; 530 regs[3] = 0; 531 break; 532 } 533 534 cpuid_count(func, param, regs); 535 switch (param) { 536 case 0: 537 /* 538 * Only permit the guest to use bits 539 * that are active in the host in 540 * %xcr0. Also, claim that the 541 * maximum save area size is 542 * equivalent to the host's current 543 * save area size. Since this runs 544 * "inside" of vmrun(), it runs with 545 * the guest's xcr0, so the current 546 * save area size is correct as-is. 547 */ 548 regs[0] &= limits->xcr0_allowed; 549 regs[2] = limits->xsave_max_size; 550 regs[3] &= (limits->xcr0_allowed >> 32); 551 break; 552 case 1: 553 /* Only permit XSAVEOPT. */ 554 regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 555 regs[1] = 0; 556 regs[2] = 0; 557 regs[3] = 0; 558 break; 559 default: 560 /* 561 * If the leaf is for a permitted feature, 562 * pass through as-is, otherwise return 563 * all zeroes. 564 */ 565 if (!(limits->xcr0_allowed & (1ul << param))) { 566 regs[0] = 0; 567 regs[1] = 0; 568 regs[2] = 0; 569 regs[3] = 0; 570 } 571 break; 572 } 573 break; 574 575 case CPUID_0000_000F: 576 case CPUID_0000_0010: 577 /* 578 * Do not report any Resource Director Technology 579 * capabilities. Exposing control of cache or memory 580 * controller resource partitioning to the guest is not 581 * at all sensible. 582 * 583 * This is already hidden at a high level by masking of 584 * leaf 0x7. Even still, a guest may look here for 585 * detailed capability information. 586 */ 587 regs[0] = 0; 588 regs[1] = 0; 589 regs[2] = 0; 590 regs[3] = 0; 591 break; 592 593 case CPUID_0000_0015: 594 /* 595 * Don't report CPU TSC/Crystal ratio and clock 596 * values since guests may use these to derive the 597 * local APIC frequency.. 598 */ 599 regs[0] = 0; 600 regs[1] = 0; 601 regs[2] = 0; 602 regs[3] = 0; 603 break; 604 605 case 0x40000000: 606 regs[0] = CPUID_VM_HIGH; 607 bcopy(bhyve_id, ®s[1], 4); 608 bcopy(bhyve_id + 4, ®s[2], 4); 609 bcopy(bhyve_id + 8, ®s[3], 4); 610 break; 611 612 default: 613 default_leaf: 614 /* 615 * The leaf value has already been clamped so 616 * simply pass this through, keeping count of 617 * how many unhandled leaf values have been seen. 618 */ 619 atomic_add_long(&bhyve_xcpuids, 1); 620 cpuid_count(func, param, regs); 621 break; 622 } 623 624 /* 625 * CPUID clears the upper 32-bits of the long-mode registers. 626 */ 627 *rax = regs[0]; 628 *rbx = regs[1]; 629 *rcx = regs[2]; 630 *rdx = regs[3]; 631 632 return (1); 633 } 634 635 bool 636 vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap) 637 { 638 bool rv; 639 640 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 641 __func__, cap)); 642 643 /* 644 * Simply passthrough the capabilities of the host cpu for now. 645 */ 646 rv = false; 647 switch (cap) { 648 case VCC_NO_EXECUTE: 649 if (amd_feature & AMDID_NX) 650 rv = true; 651 break; 652 case VCC_FFXSR: 653 if (amd_feature & AMDID_FFXSR) 654 rv = true; 655 break; 656 case VCC_TCE: 657 if (amd_feature2 & AMDID2_TCE) 658 rv = true; 659 break; 660 default: 661 panic("%s: unknown vm_cpu_capability %d", __func__, cap); 662 } 663 return (rv); 664 } 665 666 int 667 vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val) 668 { 669 switch (num) { 670 case MSR_MTRRcap: 671 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 672 break; 673 case MSR_MTRRdefType: 674 *val = mtrr->def_type; 675 break; 676 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 677 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 678 break; 679 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 680 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 681 break; 682 case MSR_MTRR64kBase: 683 *val = mtrr->fixed64k; 684 break; 685 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 686 u_int offset = num - MSR_MTRRVarBase; 687 if (offset % 2 == 0) { 688 *val = mtrr->var[offset / 2].base; 689 } else { 690 *val = mtrr->var[offset / 2].mask; 691 } 692 break; 693 } 694 default: 695 return (-1); 696 } 697 698 return (0); 699 } 700 701 int 702 vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val) 703 { 704 switch (num) { 705 case MSR_MTRRcap: 706 /* MTRRCAP is read only */ 707 return (-1); 708 case MSR_MTRRdefType: 709 if (val & ~VMM_MTRR_DEF_MASK) { 710 /* generate #GP on writes to reserved fields */ 711 return (-1); 712 } 713 mtrr->def_type = val; 714 break; 715 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 716 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 717 break; 718 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 719 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 720 break; 721 case MSR_MTRR64kBase: 722 mtrr->fixed64k = val; 723 break; 724 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 725 u_int offset = num - MSR_MTRRVarBase; 726 if (offset % 2 == 0) { 727 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 728 /* generate #GP on writes to reserved fields */ 729 return (-1); 730 } 731 mtrr->var[offset / 2].base = val; 732 } else { 733 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 734 /* generate #GP on writes to reserved fields */ 735 return (-1); 736 } 737 mtrr->var[offset / 2].mask = val; 738 } 739 break; 740 } 741 default: 742 return (-1); 743 } 744 745 return (0); 746 } 747