1*9c38ddb3SSean Christopherson // SPDX-License-Identifier: GPL-2.0-only 2*9c38ddb3SSean Christopherson #include <asm/msr-index.h> 3*9c38ddb3SSean Christopherson 4*9c38ddb3SSean Christopherson #include <stdint.h> 5*9c38ddb3SSean Christopherson 6*9c38ddb3SSean Christopherson #include "kvm_util.h" 7*9c38ddb3SSean Christopherson #include "processor.h" 8*9c38ddb3SSean Christopherson 9*9c38ddb3SSean Christopherson /* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */ 10*9c38ddb3SSean Christopherson #define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR 11*9c38ddb3SSean Christopherson 12*9c38ddb3SSean Christopherson struct kvm_msr { 13*9c38ddb3SSean Christopherson const struct kvm_x86_cpu_feature feature; 14*9c38ddb3SSean Christopherson const struct kvm_x86_cpu_feature feature2; 15*9c38ddb3SSean Christopherson const char *name; 16*9c38ddb3SSean Christopherson const u64 reset_val; 17*9c38ddb3SSean Christopherson const u64 write_val; 18*9c38ddb3SSean Christopherson const u64 rsvd_val; 19*9c38ddb3SSean Christopherson const u32 index; 20*9c38ddb3SSean Christopherson }; 21*9c38ddb3SSean Christopherson 22*9c38ddb3SSean Christopherson #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2) \ 23*9c38ddb3SSean Christopherson { \ 24*9c38ddb3SSean Christopherson .index = msr, \ 25*9c38ddb3SSean Christopherson .name = str, \ 26*9c38ddb3SSean Christopherson .write_val = val, \ 27*9c38ddb3SSean Christopherson .rsvd_val = rsvd, \ 28*9c38ddb3SSean Christopherson .reset_val = reset, \ 29*9c38ddb3SSean Christopherson .feature = X86_FEATURE_ ##feat, \ 30*9c38ddb3SSean Christopherson .feature2 = X86_FEATURE_ ##f2, \ 31*9c38ddb3SSean Christopherson } 32*9c38ddb3SSean Christopherson 33*9c38ddb3SSean Christopherson #define __MSR_TEST(msr, str, val, rsvd, reset, feat) \ 34*9c38ddb3SSean Christopherson ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat) 35*9c38ddb3SSean Christopherson 36*9c38ddb3SSean Christopherson #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \ 37*9c38ddb3SSean Christopherson __MSR_TEST(msr, #msr, val, rsvd, reset, feat) 38*9c38ddb3SSean Christopherson 39*9c38ddb3SSean Christopherson #define MSR_TEST(msr, val, rsvd, feat) \ 40*9c38ddb3SSean Christopherson __MSR_TEST(msr, #msr, val, rsvd, 0, feat) 41*9c38ddb3SSean Christopherson 42*9c38ddb3SSean Christopherson #define MSR_TEST2(msr, val, rsvd, feat, f2) \ 43*9c38ddb3SSean Christopherson ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2) 44*9c38ddb3SSean Christopherson 45*9c38ddb3SSean Christopherson /* 46*9c38ddb3SSean Christopherson * Note, use a page aligned value for the canonical value so that the value 47*9c38ddb3SSean Christopherson * is compatible with MSRs that use bits 11:0 for things other than addresses. 48*9c38ddb3SSean Christopherson */ 49*9c38ddb3SSean Christopherson static const u64 canonical_val = 0x123456789000ull; 50*9c38ddb3SSean Christopherson 51*9c38ddb3SSean Christopherson /* 52*9c38ddb3SSean Christopherson * Arbitrary value with bits set in every byte, but not all bits set. This is 53*9c38ddb3SSean Christopherson * also a non-canonical value, but that's coincidental (any 64-bit value with 54*9c38ddb3SSean Christopherson * an alternating 0s/1s pattern will be non-canonical). 55*9c38ddb3SSean Christopherson */ 56*9c38ddb3SSean Christopherson static const u64 u64_val = 0xaaaa5555aaaa5555ull; 57*9c38ddb3SSean Christopherson 58*9c38ddb3SSean Christopherson #define MSR_TEST_CANONICAL(msr, feat) \ 59*9c38ddb3SSean Christopherson __MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat) 60*9c38ddb3SSean Christopherson 61*9c38ddb3SSean Christopherson /* 62*9c38ddb3SSean Christopherson * The main struct must be scoped to a function due to the use of structures to 63*9c38ddb3SSean Christopherson * define features. For the global structure, allocate enough space for the 64*9c38ddb3SSean Christopherson * foreseeable future without getting too ridiculous, to minimize maintenance 65*9c38ddb3SSean Christopherson * costs (bumping the array size every time an MSR is added is really annoying). 66*9c38ddb3SSean Christopherson */ 67*9c38ddb3SSean Christopherson static struct kvm_msr msrs[128]; 68*9c38ddb3SSean Christopherson static int idx; 69*9c38ddb3SSean Christopherson 70*9c38ddb3SSean Christopherson static bool ignore_unsupported_msrs; 71*9c38ddb3SSean Christopherson 72*9c38ddb3SSean Christopherson static u64 fixup_rdmsr_val(u32 msr, u64 want) 73*9c38ddb3SSean Christopherson { 74*9c38ddb3SSean Christopherson /* 75*9c38ddb3SSean Christopherson * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support. KVM 76*9c38ddb3SSean Christopherson * is supposed to emulate that behavior based on guest vendor model 77*9c38ddb3SSean Christopherson * (which is the same as the host vendor model for this test). 78*9c38ddb3SSean Christopherson */ 79*9c38ddb3SSean Christopherson if (!host_cpu_is_amd) 80*9c38ddb3SSean Christopherson return want; 81*9c38ddb3SSean Christopherson 82*9c38ddb3SSean Christopherson switch (msr) { 83*9c38ddb3SSean Christopherson case MSR_IA32_SYSENTER_ESP: 84*9c38ddb3SSean Christopherson case MSR_IA32_SYSENTER_EIP: 85*9c38ddb3SSean Christopherson case MSR_TSC_AUX: 86*9c38ddb3SSean Christopherson return want & GENMASK_ULL(31, 0); 87*9c38ddb3SSean Christopherson default: 88*9c38ddb3SSean Christopherson return want; 89*9c38ddb3SSean Christopherson } 90*9c38ddb3SSean Christopherson } 91*9c38ddb3SSean Christopherson 92*9c38ddb3SSean Christopherson static void __rdmsr(u32 msr, u64 want) 93*9c38ddb3SSean Christopherson { 94*9c38ddb3SSean Christopherson u64 val; 95*9c38ddb3SSean Christopherson u8 vec; 96*9c38ddb3SSean Christopherson 97*9c38ddb3SSean Christopherson vec = rdmsr_safe(msr, &val); 98*9c38ddb3SSean Christopherson __GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr); 99*9c38ddb3SSean Christopherson 100*9c38ddb3SSean Christopherson __GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx", 101*9c38ddb3SSean Christopherson want, msr, val); 102*9c38ddb3SSean Christopherson } 103*9c38ddb3SSean Christopherson 104*9c38ddb3SSean Christopherson static void __wrmsr(u32 msr, u64 val) 105*9c38ddb3SSean Christopherson { 106*9c38ddb3SSean Christopherson u8 vec; 107*9c38ddb3SSean Christopherson 108*9c38ddb3SSean Christopherson vec = wrmsr_safe(msr, val); 109*9c38ddb3SSean Christopherson __GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)", 110*9c38ddb3SSean Christopherson ex_str(vec), msr, val); 111*9c38ddb3SSean Christopherson __rdmsr(msr, fixup_rdmsr_val(msr, val)); 112*9c38ddb3SSean Christopherson } 113*9c38ddb3SSean Christopherson 114*9c38ddb3SSean Christopherson static void guest_test_supported_msr(const struct kvm_msr *msr) 115*9c38ddb3SSean Christopherson { 116*9c38ddb3SSean Christopherson __rdmsr(msr->index, msr->reset_val); 117*9c38ddb3SSean Christopherson __wrmsr(msr->index, msr->write_val); 118*9c38ddb3SSean Christopherson GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val)); 119*9c38ddb3SSean Christopherson 120*9c38ddb3SSean Christopherson __rdmsr(msr->index, msr->reset_val); 121*9c38ddb3SSean Christopherson } 122*9c38ddb3SSean Christopherson 123*9c38ddb3SSean Christopherson static void guest_test_unsupported_msr(const struct kvm_msr *msr) 124*9c38ddb3SSean Christopherson { 125*9c38ddb3SSean Christopherson u64 val; 126*9c38ddb3SSean Christopherson u8 vec; 127*9c38ddb3SSean Christopherson 128*9c38ddb3SSean Christopherson /* 129*9c38ddb3SSean Christopherson * KVM's ABI with respect to ignore_msrs is a mess and largely beyond 130*9c38ddb3SSean Christopherson * repair, just skip the unsupported MSR tests. 131*9c38ddb3SSean Christopherson */ 132*9c38ddb3SSean Christopherson if (ignore_unsupported_msrs) 133*9c38ddb3SSean Christopherson goto skip_wrmsr_gp; 134*9c38ddb3SSean Christopherson 135*9c38ddb3SSean Christopherson if (this_cpu_has(msr->feature2)) 136*9c38ddb3SSean Christopherson goto skip_wrmsr_gp; 137*9c38ddb3SSean Christopherson 138*9c38ddb3SSean Christopherson vec = rdmsr_safe(msr->index, &val); 139*9c38ddb3SSean Christopherson __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s", 140*9c38ddb3SSean Christopherson msr->index, ex_str(vec)); 141*9c38ddb3SSean Christopherson 142*9c38ddb3SSean Christopherson vec = wrmsr_safe(msr->index, msr->write_val); 143*9c38ddb3SSean Christopherson __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s", 144*9c38ddb3SSean Christopherson msr->index, msr->write_val, ex_str(vec)); 145*9c38ddb3SSean Christopherson 146*9c38ddb3SSean Christopherson skip_wrmsr_gp: 147*9c38ddb3SSean Christopherson GUEST_SYNC(0); 148*9c38ddb3SSean Christopherson } 149*9c38ddb3SSean Christopherson 150*9c38ddb3SSean Christopherson void guest_test_reserved_val(const struct kvm_msr *msr) 151*9c38ddb3SSean Christopherson { 152*9c38ddb3SSean Christopherson /* Skip reserved value checks as well, ignore_msrs is trully a mess. */ 153*9c38ddb3SSean Christopherson if (ignore_unsupported_msrs) 154*9c38ddb3SSean Christopherson return; 155*9c38ddb3SSean Christopherson 156*9c38ddb3SSean Christopherson /* 157*9c38ddb3SSean Christopherson * If the CPU will truncate the written value (e.g. SYSENTER on AMD), 158*9c38ddb3SSean Christopherson * expect success and a truncated value, not #GP. 159*9c38ddb3SSean Christopherson */ 160*9c38ddb3SSean Christopherson if (!this_cpu_has(msr->feature) || 161*9c38ddb3SSean Christopherson msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) { 162*9c38ddb3SSean Christopherson u8 vec = wrmsr_safe(msr->index, msr->rsvd_val); 163*9c38ddb3SSean Christopherson 164*9c38ddb3SSean Christopherson __GUEST_ASSERT(vec == GP_VECTOR, 165*9c38ddb3SSean Christopherson "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s", 166*9c38ddb3SSean Christopherson msr->index, msr->rsvd_val, ex_str(vec)); 167*9c38ddb3SSean Christopherson } else { 168*9c38ddb3SSean Christopherson __wrmsr(msr->index, msr->rsvd_val); 169*9c38ddb3SSean Christopherson __wrmsr(msr->index, msr->reset_val); 170*9c38ddb3SSean Christopherson } 171*9c38ddb3SSean Christopherson } 172*9c38ddb3SSean Christopherson 173*9c38ddb3SSean Christopherson static void guest_main(void) 174*9c38ddb3SSean Christopherson { 175*9c38ddb3SSean Christopherson for (;;) { 176*9c38ddb3SSean Christopherson const struct kvm_msr *msr = &msrs[READ_ONCE(idx)]; 177*9c38ddb3SSean Christopherson 178*9c38ddb3SSean Christopherson if (this_cpu_has(msr->feature)) 179*9c38ddb3SSean Christopherson guest_test_supported_msr(msr); 180*9c38ddb3SSean Christopherson else 181*9c38ddb3SSean Christopherson guest_test_unsupported_msr(msr); 182*9c38ddb3SSean Christopherson 183*9c38ddb3SSean Christopherson if (msr->rsvd_val) 184*9c38ddb3SSean Christopherson guest_test_reserved_val(msr); 185*9c38ddb3SSean Christopherson 186*9c38ddb3SSean Christopherson GUEST_SYNC(msr->reset_val); 187*9c38ddb3SSean Christopherson } 188*9c38ddb3SSean Christopherson } 189*9c38ddb3SSean Christopherson 190*9c38ddb3SSean Christopherson static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val) 191*9c38ddb3SSean Christopherson { 192*9c38ddb3SSean Christopherson u64 reset_val = msrs[idx].reset_val; 193*9c38ddb3SSean Christopherson u32 msr = msrs[idx].index; 194*9c38ddb3SSean Christopherson u64 val; 195*9c38ddb3SSean Christopherson 196*9c38ddb3SSean Christopherson if (!kvm_cpu_has(msrs[idx].feature)) 197*9c38ddb3SSean Christopherson return; 198*9c38ddb3SSean Christopherson 199*9c38ddb3SSean Christopherson val = vcpu_get_msr(vcpu, msr); 200*9c38ddb3SSean Christopherson TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx", 201*9c38ddb3SSean Christopherson guest_val, msr, val); 202*9c38ddb3SSean Christopherson 203*9c38ddb3SSean Christopherson vcpu_set_msr(vcpu, msr, reset_val); 204*9c38ddb3SSean Christopherson 205*9c38ddb3SSean Christopherson val = vcpu_get_msr(vcpu, msr); 206*9c38ddb3SSean Christopherson TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx", 207*9c38ddb3SSean Christopherson reset_val, msr, val); 208*9c38ddb3SSean Christopherson } 209*9c38ddb3SSean Christopherson 210*9c38ddb3SSean Christopherson static void do_vcpu_run(struct kvm_vcpu *vcpu) 211*9c38ddb3SSean Christopherson { 212*9c38ddb3SSean Christopherson struct ucall uc; 213*9c38ddb3SSean Christopherson 214*9c38ddb3SSean Christopherson for (;;) { 215*9c38ddb3SSean Christopherson vcpu_run(vcpu); 216*9c38ddb3SSean Christopherson 217*9c38ddb3SSean Christopherson switch (get_ucall(vcpu, &uc)) { 218*9c38ddb3SSean Christopherson case UCALL_SYNC: 219*9c38ddb3SSean Christopherson host_test_msr(vcpu, uc.args[1]); 220*9c38ddb3SSean Christopherson return; 221*9c38ddb3SSean Christopherson case UCALL_PRINTF: 222*9c38ddb3SSean Christopherson pr_info("%s", uc.buffer); 223*9c38ddb3SSean Christopherson break; 224*9c38ddb3SSean Christopherson case UCALL_ABORT: 225*9c38ddb3SSean Christopherson REPORT_GUEST_ASSERT(uc); 226*9c38ddb3SSean Christopherson case UCALL_DONE: 227*9c38ddb3SSean Christopherson TEST_FAIL("Unexpected UCALL_DONE"); 228*9c38ddb3SSean Christopherson default: 229*9c38ddb3SSean Christopherson TEST_FAIL("Unexpected ucall: %lu", uc.cmd); 230*9c38ddb3SSean Christopherson } 231*9c38ddb3SSean Christopherson } 232*9c38ddb3SSean Christopherson } 233*9c38ddb3SSean Christopherson 234*9c38ddb3SSean Christopherson static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS) 235*9c38ddb3SSean Christopherson { 236*9c38ddb3SSean Christopherson int i; 237*9c38ddb3SSean Christopherson 238*9c38ddb3SSean Christopherson for (i = 0; i < NR_VCPUS; i++) 239*9c38ddb3SSean Christopherson do_vcpu_run(vcpus[i]); 240*9c38ddb3SSean Christopherson } 241*9c38ddb3SSean Christopherson 242*9c38ddb3SSean Christopherson #define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) 243*9c38ddb3SSean Christopherson 244*9c38ddb3SSean Christopherson static void test_msrs(void) 245*9c38ddb3SSean Christopherson { 246*9c38ddb3SSean Christopherson const struct kvm_msr __msrs[] = { 247*9c38ddb3SSean Christopherson MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE, 248*9c38ddb3SSean Christopherson MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING, 249*9c38ddb3SSean Christopherson MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE), 250*9c38ddb3SSean Christopherson MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE), 251*9c38ddb3SSean Christopherson 252*9c38ddb3SSean Christopherson /* 253*9c38ddb3SSean Christopherson * TSC_AUX is supported if RDTSCP *or* RDPID is supported. Add 254*9c38ddb3SSean Christopherson * entries for each features so that TSC_AUX doesn't exists for 255*9c38ddb3SSean Christopherson * the "unsupported" vCPU, and obviously to test both cases. 256*9c38ddb3SSean Christopherson */ 257*9c38ddb3SSean Christopherson MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID), 258*9c38ddb3SSean Christopherson MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP), 259*9c38ddb3SSean Christopherson 260*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE), 261*9c38ddb3SSean Christopherson /* 262*9c38ddb3SSean Christopherson * SYSENTER_{ESP,EIP} are technically non-canonical on Intel, 263*9c38ddb3SSean Christopherson * but KVM doesn't emulate that behavior on emulated writes, 264*9c38ddb3SSean Christopherson * i.e. this test will observe different behavior if the MSR 265*9c38ddb3SSean Christopherson * writes are handed by hardware vs. KVM. KVM's behavior is 266*9c38ddb3SSean Christopherson * intended (though far from ideal), so don't bother testing 267*9c38ddb3SSean Christopherson * non-canonical values. 268*9c38ddb3SSean Christopherson */ 269*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE), 270*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE), 271*9c38ddb3SSean Christopherson 272*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_FS_BASE, LM), 273*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_GS_BASE, LM), 274*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM), 275*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_LSTAR, LM), 276*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_CSTAR, LM), 277*9c38ddb3SSean Christopherson MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM), 278*9c38ddb3SSean Christopherson 279*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK), 280*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK), 281*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK), 282*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK), 283*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK), 284*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK), 285*9c38ddb3SSean Christopherson MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK), 286*9c38ddb3SSean Christopherson MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK), 287*9c38ddb3SSean Christopherson }; 288*9c38ddb3SSean Christopherson 289*9c38ddb3SSean Christopherson /* 290*9c38ddb3SSean Christopherson * Create two vCPUs, but run them on the same task, to validate KVM's 291*9c38ddb3SSean Christopherson * context switching of MSR state. Don't pin the task to a pCPU to 292*9c38ddb3SSean Christopherson * also validate KVM's handling of cross-pCPU migration. 293*9c38ddb3SSean Christopherson */ 294*9c38ddb3SSean Christopherson const int NR_VCPUS = 2; 295*9c38ddb3SSean Christopherson struct kvm_vcpu *vcpus[NR_VCPUS]; 296*9c38ddb3SSean Christopherson struct kvm_vm *vm; 297*9c38ddb3SSean Christopherson 298*9c38ddb3SSean Christopherson kvm_static_assert(sizeof(__msrs) <= sizeof(msrs)); 299*9c38ddb3SSean Christopherson kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs)); 300*9c38ddb3SSean Christopherson memcpy(msrs, __msrs, sizeof(__msrs)); 301*9c38ddb3SSean Christopherson 302*9c38ddb3SSean Christopherson ignore_unsupported_msrs = kvm_is_ignore_msrs(); 303*9c38ddb3SSean Christopherson 304*9c38ddb3SSean Christopherson vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus); 305*9c38ddb3SSean Christopherson 306*9c38ddb3SSean Christopherson sync_global_to_guest(vm, msrs); 307*9c38ddb3SSean Christopherson sync_global_to_guest(vm, ignore_unsupported_msrs); 308*9c38ddb3SSean Christopherson 309*9c38ddb3SSean Christopherson for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) { 310*9c38ddb3SSean Christopherson sync_global_to_guest(vm, idx); 311*9c38ddb3SSean Christopherson 312*9c38ddb3SSean Christopherson vcpus_run(vcpus, NR_VCPUS); 313*9c38ddb3SSean Christopherson vcpus_run(vcpus, NR_VCPUS); 314*9c38ddb3SSean Christopherson } 315*9c38ddb3SSean Christopherson 316*9c38ddb3SSean Christopherson kvm_vm_free(vm); 317*9c38ddb3SSean Christopherson } 318*9c38ddb3SSean Christopherson 319*9c38ddb3SSean Christopherson int main(void) 320*9c38ddb3SSean Christopherson { 321*9c38ddb3SSean Christopherson test_msrs(); 322*9c38ddb3SSean Christopherson } 323