1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/pcpu.h> 36 #include <sys/systm.h> 37 #include <sys/sysctl.h> 38 39 #include <machine/clock.h> 40 #include <machine/cpufunc.h> 41 #include <machine/md_var.h> 42 #include <machine/segments.h> 43 #include <machine/specialreg.h> 44 45 #include <machine/vmm.h> 46 47 #include "vmm_host.h" 48 #include "vmm_ktr.h" 49 #include "vmm_util.h" 50 #include "x86.h" 51 52 SYSCTL_DECL(_hw_vmm); 53 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 54 NULL); 55 56 #define CPUID_VM_HIGH 0x40000000 57 58 static const char bhyve_id[12] = "bhyve bhyve "; 59 60 static uint64_t bhyve_xcpuids; 61 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 62 "Number of times an unknown cpuid leaf was accessed"); 63 64 #if __FreeBSD_version < 1200060 /* Remove after 11 EOL helps MFCing */ 65 extern u_int threads_per_core; 66 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN, 67 &threads_per_core, 0, NULL); 68 69 extern u_int cores_per_package; 70 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN, 71 &cores_per_package, 0, NULL); 72 #endif 73 74 static int cpuid_leaf_b = 1; 75 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 76 &cpuid_leaf_b, 0, NULL); 77 78 /* 79 * Round up to the next power of two, if necessary, and then take log2. 80 * Returns -1 if argument is zero. 81 */ 82 static __inline int 83 log2(u_int x) 84 { 85 86 return (fls(x << (1 - powerof2(x))) - 1); 87 } 88 89 int 90 x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx, 91 uint64_t *rcx, uint64_t *rdx) 92 { 93 struct vm *vm = vcpu_vm(vcpu); 94 int vcpu_id = vcpu_vcpuid(vcpu); 95 const struct xsave_limits *limits; 96 uint64_t cr4; 97 int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, 98 width, x2apic_id; 99 unsigned int func, regs[4], logical_cpus, param; 100 enum x2apic_state x2apic_state; 101 uint16_t cores, maxcpus, sockets, threads; 102 103 /* 104 * The function of CPUID is controlled through the provided value of 105 * %eax (and secondarily %ecx, for certain leaf data). 106 */ 107 func = (uint32_t)*rax; 108 param = (uint32_t)*rcx; 109 110 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param); 111 112 /* 113 * Requests for invalid CPUID levels should map to the highest 114 * available level instead. 115 */ 116 if (cpu_exthigh != 0 && func >= 0x80000000) { 117 if (func > cpu_exthigh) 118 func = cpu_exthigh; 119 } else if (func >= 0x40000000) { 120 if (func > CPUID_VM_HIGH) 121 func = CPUID_VM_HIGH; 122 } else if (func > cpu_high) { 123 func = cpu_high; 124 } 125 126 /* 127 * In general the approach used for CPU topology is to 128 * advertise a flat topology where all CPUs are packages with 129 * no multi-core or SMT. 130 */ 131 switch (func) { 132 /* 133 * Pass these through to the guest 134 */ 135 case CPUID_0000_0000: 136 case CPUID_0000_0002: 137 case CPUID_0000_0003: 138 case CPUID_8000_0000: 139 case CPUID_8000_0002: 140 case CPUID_8000_0003: 141 case CPUID_8000_0004: 142 case CPUID_8000_0006: 143 cpuid_count(func, param, regs); 144 break; 145 case CPUID_8000_0008: 146 cpuid_count(func, param, regs); 147 if (vmm_is_svm()) { 148 /* 149 * As on Intel (0000_0007:0, EDX), mask out 150 * unsupported or unsafe AMD extended features 151 * (8000_0008 EBX). 152 */ 153 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF | 154 AMDFEID_XSAVEERPTR); 155 156 vm_get_topology(vm, &sockets, &cores, &threads, 157 &maxcpus); 158 /* 159 * Here, width is ApicIdCoreIdSize, present on 160 * at least Family 15h and newer. It 161 * represents the "number of bits in the 162 * initial apicid that indicate thread id 163 * within a package." 164 * 165 * Our topo_probe_amd() uses it for 166 * pkg_id_shift and other OSes may rely on it. 167 */ 168 width = MIN(0xF, log2(threads * cores)); 169 if (width < 0x4) 170 width = 0; 171 logical_cpus = MIN(0xFF, threads * cores - 1); 172 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus; 173 } 174 break; 175 176 case CPUID_8000_0001: 177 cpuid_count(func, param, regs); 178 179 /* 180 * Hide SVM from guest. 181 */ 182 regs[2] &= ~AMDID2_SVM; 183 184 /* 185 * Don't advertise extended performance counter MSRs 186 * to the guest. 187 */ 188 regs[2] &= ~AMDID2_PCXC; 189 regs[2] &= ~AMDID2_PNXC; 190 regs[2] &= ~AMDID2_PTSCEL2I; 191 192 /* 193 * Don't advertise Instruction Based Sampling feature. 194 */ 195 regs[2] &= ~AMDID2_IBS; 196 197 /* NodeID MSR not available */ 198 regs[2] &= ~AMDID2_NODE_ID; 199 200 /* Don't advertise the OS visible workaround feature */ 201 regs[2] &= ~AMDID2_OSVW; 202 203 /* Hide mwaitx/monitorx capability from the guest */ 204 regs[2] &= ~AMDID2_MWAITX; 205 206 /* Advertise RDTSCP if it is enabled. */ 207 error = vm_get_capability(vcpu, 208 VM_CAP_RDTSCP, &enable_rdtscp); 209 if (error == 0 && enable_rdtscp) 210 regs[3] |= AMDID_RDTSCP; 211 else 212 regs[3] &= ~AMDID_RDTSCP; 213 break; 214 215 case CPUID_8000_0007: 216 /* 217 * AMD uses this leaf to advertise the processor's 218 * power monitoring and RAS capabilities. These 219 * features are hardware-specific and exposing 220 * them to a guest doesn't make a lot of sense. 221 * 222 * Intel uses this leaf only to advertise the 223 * "Invariant TSC" feature with all other bits 224 * being reserved (set to zero). 225 */ 226 regs[0] = 0; 227 regs[1] = 0; 228 regs[2] = 0; 229 regs[3] = 0; 230 231 /* 232 * "Invariant TSC" can be advertised to the guest if: 233 * - host TSC frequency is invariant 234 * - host TSCs are synchronized across physical cpus 235 * 236 * XXX This still falls short because the vcpu 237 * can observe the TSC moving backwards as it 238 * migrates across physical cpus. But at least 239 * it should discourage the guest from using the 240 * TSC to keep track of time. 241 */ 242 if (tsc_is_invariant && smp_tsc) 243 regs[3] |= AMDPM_TSC_INVARIANT; 244 break; 245 246 case CPUID_8000_001D: 247 /* AMD Cache topology, like 0000_0004 for Intel. */ 248 if (!vmm_is_svm()) 249 goto default_leaf; 250 251 /* 252 * Similar to Intel, generate a ficticious cache 253 * topology for the guest with L3 shared by the 254 * package, and L1 and L2 local to a core. 255 */ 256 vm_get_topology(vm, &sockets, &cores, &threads, 257 &maxcpus); 258 switch (param) { 259 case 0: 260 logical_cpus = threads; 261 level = 1; 262 func = 1; /* data cache */ 263 break; 264 case 1: 265 logical_cpus = threads; 266 level = 2; 267 func = 3; /* unified cache */ 268 break; 269 case 2: 270 logical_cpus = threads * cores; 271 level = 3; 272 func = 3; /* unified cache */ 273 break; 274 default: 275 logical_cpus = 0; 276 level = 0; 277 func = 0; 278 break; 279 } 280 281 logical_cpus = MIN(0xfff, logical_cpus - 1); 282 regs[0] = (logical_cpus << 14) | (1 << 8) | 283 (level << 5) | func; 284 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0; 285 regs[2] = 0; 286 regs[3] = 0; 287 break; 288 289 case CPUID_8000_001E: 290 /* 291 * AMD Family 16h+ and Hygon Family 18h additional 292 * identifiers. 293 */ 294 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) 295 goto default_leaf; 296 297 vm_get_topology(vm, &sockets, &cores, &threads, 298 &maxcpus); 299 regs[0] = vcpu_id; 300 threads = MIN(0xFF, threads - 1); 301 regs[1] = (threads << 8) | 302 (vcpu_id >> log2(threads + 1)); 303 /* 304 * XXX Bhyve topology cannot yet represent >1 node per 305 * processor. 306 */ 307 regs[2] = 0; 308 regs[3] = 0; 309 break; 310 311 case CPUID_0000_0001: 312 do_cpuid(1, regs); 313 314 error = vm_get_x2apic_state(vcpu, &x2apic_state); 315 if (error) { 316 panic("x86_emulate_cpuid: error %d " 317 "fetching x2apic state", error); 318 } 319 320 /* 321 * Override the APIC ID only in ebx 322 */ 323 regs[1] &= ~(CPUID_LOCAL_APIC_ID); 324 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 325 326 /* 327 * Don't expose VMX, SpeedStep, TME or SMX capability. 328 * Advertise x2APIC capability and Hypervisor guest. 329 */ 330 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 331 regs[2] &= ~(CPUID2_SMX); 332 333 regs[2] |= CPUID2_HV; 334 335 if (x2apic_state != X2APIC_DISABLED) 336 regs[2] |= CPUID2_X2APIC; 337 else 338 regs[2] &= ~CPUID2_X2APIC; 339 340 /* 341 * Only advertise CPUID2_XSAVE in the guest if 342 * the host is using XSAVE. 343 */ 344 if (!(regs[2] & CPUID2_OSXSAVE)) 345 regs[2] &= ~CPUID2_XSAVE; 346 347 /* 348 * If CPUID2_XSAVE is being advertised and the 349 * guest has set CR4_XSAVE, set 350 * CPUID2_OSXSAVE. 351 */ 352 regs[2] &= ~CPUID2_OSXSAVE; 353 if (regs[2] & CPUID2_XSAVE) { 354 error = vm_get_register(vcpu, 355 VM_REG_GUEST_CR4, &cr4); 356 if (error) 357 panic("x86_emulate_cpuid: error %d " 358 "fetching %%cr4", error); 359 if (cr4 & CR4_XSAVE) 360 regs[2] |= CPUID2_OSXSAVE; 361 } 362 363 /* 364 * Hide monitor/mwait until we know how to deal with 365 * these instructions. 366 */ 367 regs[2] &= ~CPUID2_MON; 368 369 /* 370 * Hide the performance and debug features. 371 */ 372 regs[2] &= ~CPUID2_PDCM; 373 374 /* 375 * No TSC deadline support in the APIC yet 376 */ 377 regs[2] &= ~CPUID2_TSCDLT; 378 379 /* 380 * Hide thermal monitoring 381 */ 382 regs[3] &= ~(CPUID_ACPI | CPUID_TM); 383 384 /* 385 * Hide the debug store capability. 386 */ 387 regs[3] &= ~CPUID_DS; 388 389 /* 390 * Advertise the Machine Check and MTRR capability. 391 * 392 * Some guest OSes (e.g. Windows) will not boot if 393 * these features are absent. 394 */ 395 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 396 397 vm_get_topology(vm, &sockets, &cores, &threads, 398 &maxcpus); 399 logical_cpus = threads * cores; 400 regs[1] &= ~CPUID_HTT_CORES; 401 regs[1] |= (logical_cpus & 0xff) << 16; 402 regs[3] |= CPUID_HTT; 403 break; 404 405 case CPUID_0000_0004: 406 cpuid_count(func, param, regs); 407 408 if (regs[0] || regs[1] || regs[2] || regs[3]) { 409 vm_get_topology(vm, &sockets, &cores, &threads, 410 &maxcpus); 411 regs[0] &= 0x3ff; 412 regs[0] |= (cores - 1) << 26; 413 /* 414 * Cache topology: 415 * - L1 and L2 are shared only by the logical 416 * processors in a single core. 417 * - L3 and above are shared by all logical 418 * processors in the package. 419 */ 420 logical_cpus = threads; 421 level = (regs[0] >> 5) & 0x7; 422 if (level >= 3) 423 logical_cpus *= cores; 424 regs[0] |= (logical_cpus - 1) << 14; 425 } 426 break; 427 428 case CPUID_0000_0007: 429 regs[0] = 0; 430 regs[1] = 0; 431 regs[2] = 0; 432 regs[3] = 0; 433 434 /* leaf 0 */ 435 if (param == 0) { 436 cpuid_count(func, param, regs); 437 438 /* Only leaf 0 is supported */ 439 regs[0] = 0; 440 441 /* 442 * Expose known-safe features. 443 */ 444 regs[1] &= (CPUID_STDEXT_FSGSBASE | 445 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 446 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP | 447 CPUID_STDEXT_BMI2 | 448 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 449 CPUID_STDEXT_AVX512F | 450 CPUID_STDEXT_RDSEED | 451 CPUID_STDEXT_SMAP | 452 CPUID_STDEXT_AVX512PF | 453 CPUID_STDEXT_AVX512ER | 454 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA); 455 regs[2] = 0; 456 regs[3] &= CPUID_STDEXT3_MD_CLEAR; 457 458 /* Advertise RDPID if it is enabled. */ 459 error = vm_get_capability(vcpu, VM_CAP_RDPID, 460 &enable_rdpid); 461 if (error == 0 && enable_rdpid) 462 regs[2] |= CPUID_STDEXT2_RDPID; 463 464 /* Advertise INVPCID if it is enabled. */ 465 error = vm_get_capability(vcpu, 466 VM_CAP_ENABLE_INVPCID, &enable_invpcid); 467 if (error == 0 && enable_invpcid) 468 regs[1] |= CPUID_STDEXT_INVPCID; 469 } 470 break; 471 472 case CPUID_0000_0006: 473 regs[0] = CPUTPM1_ARAT; 474 regs[1] = 0; 475 regs[2] = 0; 476 regs[3] = 0; 477 break; 478 479 case CPUID_0000_000A: 480 /* 481 * Handle the access, but report 0 for 482 * all options 483 */ 484 regs[0] = 0; 485 regs[1] = 0; 486 regs[2] = 0; 487 regs[3] = 0; 488 break; 489 490 case CPUID_0000_000B: 491 /* 492 * Intel processor topology enumeration 493 */ 494 if (vmm_is_intel()) { 495 vm_get_topology(vm, &sockets, &cores, &threads, 496 &maxcpus); 497 if (param == 0) { 498 logical_cpus = threads; 499 width = log2(logical_cpus); 500 level = CPUID_TYPE_SMT; 501 x2apic_id = vcpu_id; 502 } 503 504 if (param == 1) { 505 logical_cpus = threads * cores; 506 width = log2(logical_cpus); 507 level = CPUID_TYPE_CORE; 508 x2apic_id = vcpu_id; 509 } 510 511 if (!cpuid_leaf_b || param >= 2) { 512 width = 0; 513 logical_cpus = 0; 514 level = 0; 515 x2apic_id = 0; 516 } 517 518 regs[0] = width & 0x1f; 519 regs[1] = logical_cpus & 0xffff; 520 regs[2] = (level << 8) | (param & 0xff); 521 regs[3] = x2apic_id; 522 } else { 523 regs[0] = 0; 524 regs[1] = 0; 525 regs[2] = 0; 526 regs[3] = 0; 527 } 528 break; 529 530 case CPUID_0000_000D: 531 limits = vmm_get_xsave_limits(); 532 if (!limits->xsave_enabled) { 533 regs[0] = 0; 534 regs[1] = 0; 535 regs[2] = 0; 536 regs[3] = 0; 537 break; 538 } 539 540 cpuid_count(func, param, regs); 541 switch (param) { 542 case 0: 543 /* 544 * Only permit the guest to use bits 545 * that are active in the host in 546 * %xcr0. Also, claim that the 547 * maximum save area size is 548 * equivalent to the host's current 549 * save area size. Since this runs 550 * "inside" of vmrun(), it runs with 551 * the guest's xcr0, so the current 552 * save area size is correct as-is. 553 */ 554 regs[0] &= limits->xcr0_allowed; 555 regs[2] = limits->xsave_max_size; 556 regs[3] &= (limits->xcr0_allowed >> 32); 557 break; 558 case 1: 559 /* Only permit XSAVEOPT. */ 560 regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 561 regs[1] = 0; 562 regs[2] = 0; 563 regs[3] = 0; 564 break; 565 default: 566 /* 567 * If the leaf is for a permitted feature, 568 * pass through as-is, otherwise return 569 * all zeroes. 570 */ 571 if (!(limits->xcr0_allowed & (1ul << param))) { 572 regs[0] = 0; 573 regs[1] = 0; 574 regs[2] = 0; 575 regs[3] = 0; 576 } 577 break; 578 } 579 break; 580 581 case CPUID_0000_000F: 582 case CPUID_0000_0010: 583 /* 584 * Do not report any Resource Director Technology 585 * capabilities. Exposing control of cache or memory 586 * controller resource partitioning to the guest is not 587 * at all sensible. 588 * 589 * This is already hidden at a high level by masking of 590 * leaf 0x7. Even still, a guest may look here for 591 * detailed capability information. 592 */ 593 regs[0] = 0; 594 regs[1] = 0; 595 regs[2] = 0; 596 regs[3] = 0; 597 break; 598 599 case CPUID_0000_0015: 600 /* 601 * Don't report CPU TSC/Crystal ratio and clock 602 * values since guests may use these to derive the 603 * local APIC frequency.. 604 */ 605 regs[0] = 0; 606 regs[1] = 0; 607 regs[2] = 0; 608 regs[3] = 0; 609 break; 610 611 case 0x40000000: 612 regs[0] = CPUID_VM_HIGH; 613 bcopy(bhyve_id, ®s[1], 4); 614 bcopy(bhyve_id + 4, ®s[2], 4); 615 bcopy(bhyve_id + 8, ®s[3], 4); 616 break; 617 618 default: 619 default_leaf: 620 /* 621 * The leaf value has already been clamped so 622 * simply pass this through, keeping count of 623 * how many unhandled leaf values have been seen. 624 */ 625 atomic_add_long(&bhyve_xcpuids, 1); 626 cpuid_count(func, param, regs); 627 break; 628 } 629 630 /* 631 * CPUID clears the upper 32-bits of the long-mode registers. 632 */ 633 *rax = regs[0]; 634 *rbx = regs[1]; 635 *rcx = regs[2]; 636 *rdx = regs[3]; 637 638 return (1); 639 } 640 641 bool 642 vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap) 643 { 644 bool rv; 645 646 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 647 __func__, cap)); 648 649 /* 650 * Simply passthrough the capabilities of the host cpu for now. 651 */ 652 rv = false; 653 switch (cap) { 654 case VCC_NO_EXECUTE: 655 if (amd_feature & AMDID_NX) 656 rv = true; 657 break; 658 case VCC_FFXSR: 659 if (amd_feature & AMDID_FFXSR) 660 rv = true; 661 break; 662 case VCC_TCE: 663 if (amd_feature2 & AMDID2_TCE) 664 rv = true; 665 break; 666 default: 667 panic("%s: unknown vm_cpu_capability %d", __func__, cap); 668 } 669 return (rv); 670 } 671 672 int 673 vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val) 674 { 675 switch (num) { 676 case MSR_MTRRcap: 677 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 678 break; 679 case MSR_MTRRdefType: 680 *val = mtrr->def_type; 681 break; 682 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 683 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 684 break; 685 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 686 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 687 break; 688 case MSR_MTRR64kBase: 689 *val = mtrr->fixed64k; 690 break; 691 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 692 u_int offset = num - MSR_MTRRVarBase; 693 if (offset % 2 == 0) { 694 *val = mtrr->var[offset / 2].base; 695 } else { 696 *val = mtrr->var[offset / 2].mask; 697 } 698 break; 699 } 700 default: 701 return (-1); 702 } 703 704 return (0); 705 } 706 707 int 708 vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val) 709 { 710 switch (num) { 711 case MSR_MTRRcap: 712 /* MTRRCAP is read only */ 713 return (-1); 714 case MSR_MTRRdefType: 715 if (val & ~VMM_MTRR_DEF_MASK) { 716 /* generate #GP on writes to reserved fields */ 717 return (-1); 718 } 719 mtrr->def_type = val; 720 break; 721 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 722 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 723 break; 724 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 725 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 726 break; 727 case MSR_MTRR64kBase: 728 mtrr->fixed64k = val; 729 break; 730 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 731 u_int offset = num - MSR_MTRRVarBase; 732 if (offset % 2 == 0) { 733 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 734 /* generate #GP on writes to reserved fields */ 735 return (-1); 736 } 737 mtrr->var[offset / 2].base = val; 738 } else { 739 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 740 /* generate #GP on writes to reserved fields */ 741 return (-1); 742 } 743 mtrr->var[offset / 2].mask = val; 744 } 745 break; 746 } 747 default: 748 return (-1); 749 } 750 751 return (0); 752 } 753