1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <stdint.h> 10 11 #include "test_util.h" 12 #include "kvm_util.h" 13 #include "processor.h" 14 15 struct msr_data { 16 uint32_t idx; 17 const char *name; 18 }; 19 20 #define TEST_MSR(msr) { .idx = msr, .name = #msr } 21 #define UCALL_PR_MSR 0xdeadbeef 22 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) 23 24 /* 25 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or 26 * written, as the KVM_CPUID_FEATURES leaf is cleared. 27 */ 28 static struct msr_data msrs_to_test[] = { 29 TEST_MSR(MSR_KVM_SYSTEM_TIME), 30 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW), 31 TEST_MSR(MSR_KVM_WALL_CLOCK), 32 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW), 33 TEST_MSR(MSR_KVM_ASYNC_PF_EN), 34 TEST_MSR(MSR_KVM_STEAL_TIME), 35 TEST_MSR(MSR_KVM_PV_EOI_EN), 36 TEST_MSR(MSR_KVM_POLL_CONTROL), 37 TEST_MSR(MSR_KVM_ASYNC_PF_INT), 38 TEST_MSR(MSR_KVM_ASYNC_PF_ACK), 39 }; 40 41 static void test_msr(struct msr_data *msr) 42 { 43 uint64_t ignored; 44 uint8_t vector; 45 46 PR_MSR(msr); 47 48 vector = rdmsr_safe(msr->idx, &ignored); 49 GUEST_ASSERT_EQ(vector, GP_VECTOR); 50 51 vector = wrmsr_safe(msr->idx, 0); 52 GUEST_ASSERT_EQ(vector, GP_VECTOR); 53 } 54 55 struct hcall_data { 56 uint64_t nr; 57 const char *name; 58 }; 59 60 #define TEST_HCALL(hc) { .nr = hc, .name = #hc } 61 #define UCALL_PR_HCALL 0xdeadc0de 62 #define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc) 63 64 /* 65 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding 66 * features have been cleared in KVM_CPUID_FEATURES. 67 */ 68 static struct hcall_data hcalls_to_test[] = { 69 TEST_HCALL(KVM_HC_KICK_CPU), 70 TEST_HCALL(KVM_HC_SEND_IPI), 71 TEST_HCALL(KVM_HC_SCHED_YIELD), 72 }; 73 74 static void test_hcall(struct hcall_data *hc) 75 { 76 uint64_t r; 77 78 PR_HCALL(hc); 79 r = kvm_hypercall(hc->nr, 0, 0, 0, 0); 80 GUEST_ASSERT_EQ(r, -KVM_ENOSYS); 81 } 82 83 static void guest_main(void) 84 { 85 int i; 86 87 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) { 88 test_msr(&msrs_to_test[i]); 89 } 90 91 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) { 92 test_hcall(&hcalls_to_test[i]); 93 } 94 95 GUEST_DONE(); 96 } 97 98 static void pr_msr(struct ucall *uc) 99 { 100 struct msr_data *msr = (struct msr_data *)uc->args[0]; 101 102 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); 103 } 104 105 static void pr_hcall(struct ucall *uc) 106 { 107 struct hcall_data *hc = (struct hcall_data *)uc->args[0]; 108 109 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr); 110 } 111 112 static void enter_guest(struct kvm_vcpu *vcpu) 113 { 114 struct ucall uc; 115 116 while (true) { 117 vcpu_run(vcpu); 118 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 119 120 switch (get_ucall(vcpu, &uc)) { 121 case UCALL_PR_MSR: 122 pr_msr(&uc); 123 break; 124 case UCALL_PR_HCALL: 125 pr_hcall(&uc); 126 break; 127 case UCALL_ABORT: 128 REPORT_GUEST_ASSERT(uc); 129 return; 130 case UCALL_DONE: 131 return; 132 } 133 } 134 } 135 136 int main(void) 137 { 138 struct kvm_vcpu *vcpu; 139 struct kvm_vm *vm; 140 141 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)); 142 143 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 144 145 vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); 146 147 vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES); 148 149 vm_init_descriptor_tables(vm); 150 vcpu_init_descriptor_tables(vcpu); 151 152 enter_guest(vcpu); 153 kvm_vm_free(vm); 154 } 155