1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7 #include <asm/kvm_para.h> 8 #include <linux/kvm_para.h> 9 #include <stdint.h> 10 11 #include "test_util.h" 12 #include "kvm_util.h" 13 #include "processor.h" 14 15 struct msr_data { 16 uint32_t idx; 17 const char *name; 18 }; 19 20 #define TEST_MSR(msr) { .idx = msr, .name = #msr } 21 #define UCALL_PR_MSR 0xdeadbeef 22 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) 23 24 /* 25 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or 26 * written, as the KVM_CPUID_FEATURES leaf is cleared. 27 */ 28 static struct msr_data msrs_to_test[] = { 29 TEST_MSR(MSR_KVM_SYSTEM_TIME), 30 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW), 31 TEST_MSR(MSR_KVM_WALL_CLOCK), 32 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW), 33 TEST_MSR(MSR_KVM_ASYNC_PF_EN), 34 TEST_MSR(MSR_KVM_STEAL_TIME), 35 TEST_MSR(MSR_KVM_PV_EOI_EN), 36 TEST_MSR(MSR_KVM_POLL_CONTROL), 37 TEST_MSR(MSR_KVM_ASYNC_PF_INT), 38 TEST_MSR(MSR_KVM_ASYNC_PF_ACK), 39 }; 40 41 static void test_msr(struct msr_data *msr) 42 { 43 uint64_t ignored; 44 uint8_t vector; 45 46 PR_MSR(msr); 47 48 vector = rdmsr_safe(msr->idx, &ignored); 49 GUEST_ASSERT_EQ(vector, GP_VECTOR); 50 51 vector = wrmsr_safe(msr->idx, 0); 52 GUEST_ASSERT_EQ(vector, GP_VECTOR); 53 } 54 55 struct hcall_data { 56 uint64_t nr; 57 const char *name; 58 }; 59 60 #define TEST_HCALL(hc) { .nr = hc, .name = #hc } 61 #define UCALL_PR_HCALL 0xdeadc0de 62 #define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc) 63 64 /* 65 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding 66 * features have been cleared in KVM_CPUID_FEATURES. 67 */ 68 static struct hcall_data hcalls_to_test[] = { 69 TEST_HCALL(KVM_HC_KICK_CPU), 70 TEST_HCALL(KVM_HC_SEND_IPI), 71 TEST_HCALL(KVM_HC_SCHED_YIELD), 72 }; 73 74 static void test_hcall(struct hcall_data *hc) 75 { 76 uint64_t r; 77 78 PR_HCALL(hc); 79 r = kvm_hypercall(hc->nr, 0, 0, 0, 0); 80 GUEST_ASSERT_EQ(r, -KVM_ENOSYS); 81 } 82 83 static void guest_main(void) 84 { 85 int i; 86 87 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) { 88 test_msr(&msrs_to_test[i]); 89 } 90 91 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) { 92 test_hcall(&hcalls_to_test[i]); 93 } 94 95 GUEST_DONE(); 96 } 97 98 static void pr_msr(struct ucall *uc) 99 { 100 struct msr_data *msr = (struct msr_data *)uc->args[0]; 101 102 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); 103 } 104 105 static void pr_hcall(struct ucall *uc) 106 { 107 struct hcall_data *hc = (struct hcall_data *)uc->args[0]; 108 109 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr); 110 } 111 112 static void enter_guest(struct kvm_vcpu *vcpu) 113 { 114 struct ucall uc; 115 116 while (true) { 117 vcpu_run(vcpu); 118 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 119 120 switch (get_ucall(vcpu, &uc)) { 121 case UCALL_PR_MSR: 122 pr_msr(&uc); 123 break; 124 case UCALL_PR_HCALL: 125 pr_hcall(&uc); 126 break; 127 case UCALL_ABORT: 128 REPORT_GUEST_ASSERT(uc); 129 return; 130 case UCALL_DONE: 131 return; 132 } 133 } 134 } 135 136 static void test_pv_unhalt(void) 137 { 138 struct kvm_vcpu *vcpu; 139 struct kvm_vm *vm; 140 struct kvm_cpuid_entry2 *ent; 141 u32 kvm_sig_old; 142 int r; 143 144 if (!(kvm_check_cap(KVM_CAP_X86_DISABLE_EXITS) & KVM_X86_DISABLE_EXITS_HLT)) 145 return; 146 147 pr_info("testing KVM_FEATURE_PV_UNHALT\n"); 148 149 /* KVM_PV_UNHALT test */ 150 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 151 vcpu_set_cpuid_feature(vcpu, X86_FEATURE_KVM_PV_UNHALT); 152 153 TEST_ASSERT(vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT), 154 "Enabling X86_FEATURE_KVM_PV_UNHALT had no effect"); 155 156 /* Verify KVM disallows disabling exits after vCPU creation. */ 157 r = __vm_enable_cap(vm, KVM_CAP_X86_DISABLE_EXITS, KVM_X86_DISABLE_EXITS_HLT); 158 TEST_ASSERT(r && errno == EINVAL, 159 "Disabling exits after vCPU creation didn't fail as expected"); 160 161 kvm_vm_free(vm); 162 163 /* Verify that KVM clear PV_UNHALT from guest CPUID. */ 164 vm = vm_create(1); 165 vm_enable_cap(vm, KVM_CAP_X86_DISABLE_EXITS, KVM_X86_DISABLE_EXITS_HLT); 166 167 vcpu = vm_vcpu_add(vm, 0, NULL); 168 TEST_ASSERT(!vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT), 169 "vCPU created with PV_UNHALT set by default"); 170 171 vcpu_set_cpuid_feature(vcpu, X86_FEATURE_KVM_PV_UNHALT); 172 TEST_ASSERT(!vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT), 173 "PV_UNHALT set in guest CPUID when HLT-exiting is disabled"); 174 175 /* 176 * Clobber the KVM PV signature and verify KVM does NOT clear PV_UNHALT 177 * when KVM PV is not present, and DOES clear PV_UNHALT when switching 178 * back to the correct signature.. 179 */ 180 ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE); 181 kvm_sig_old = ent->ebx; 182 ent->ebx = 0xdeadbeef; 183 vcpu_set_cpuid(vcpu); 184 185 vcpu_set_cpuid_feature(vcpu, X86_FEATURE_KVM_PV_UNHALT); 186 TEST_ASSERT(vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT), 187 "PV_UNHALT cleared when using bogus KVM PV signature"); 188 189 ent = vcpu_get_cpuid_entry(vcpu, KVM_CPUID_SIGNATURE); 190 ent->ebx = kvm_sig_old; 191 vcpu_set_cpuid(vcpu); 192 193 TEST_ASSERT(!vcpu_cpuid_has(vcpu, X86_FEATURE_KVM_PV_UNHALT), 194 "PV_UNHALT set in guest CPUID when HLT-exiting is disabled"); 195 196 /* FIXME: actually test KVM_FEATURE_PV_UNHALT feature */ 197 198 kvm_vm_free(vm); 199 } 200 201 int main(void) 202 { 203 struct kvm_vcpu *vcpu; 204 struct kvm_vm *vm; 205 206 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)); 207 208 vm = vm_create_with_one_vcpu(&vcpu, guest_main); 209 210 vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); 211 212 vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES); 213 214 enter_guest(vcpu); 215 kvm_vm_free(vm); 216 217 test_pv_unhalt(); 218 } 219